Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'usb-for-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb into usb-next

usb: changes for v4.18 merge window

A total of 98 non-merge commits, the biggest part being in dwc3 this
time around with a large refactoring of dwc3's transfer handling code.

We also have a new driver for Aspeed virtual hub controller.

Apart from that, just a list of miscellaneous fixes all over the place.

+5512 -833
+21
Documentation/devicetree/bindings/usb/dwc3.txt
··· 7 7 - compatible: must be "snps,dwc3" 8 8 - reg : Address and length of the register set for the device 9 9 - interrupts: Interrupts used by the dwc3 controller. 10 + - clock-names: should contain "ref", "bus_early", "suspend" 11 + - clocks: list of phandle and clock specifier pairs corresponding to 12 + entries in the clock-names property. 13 + 14 + Exception for clocks: 15 + clocks are optional if the parent node (i.e. glue-layer) is compatible to 16 + one of the following: 17 + "amlogic,meson-axg-dwc3" 18 + "amlogic,meson-gxl-dwc3" 19 + "cavium,octeon-7130-usb-uctl" 20 + "qcom,dwc3" 21 + "samsung,exynos5250-dwusb3" 22 + "samsung,exynos7-dwusb3" 23 + "sprd,sc9860-dwc3" 24 + "st,stih407-dwc3" 25 + "ti,am437x-dwc3" 26 + "ti,dwc3" 27 + "ti,keystone-dwc3" 28 + "rockchip,rk3399-dwc3" 29 + "xlnx,zynqmp-dwc3" 10 30 11 31 Optional properties: 12 32 - usb-phy : array of phandle for the PHY device. The first element ··· 35 15 - phys: from the *Generic PHY* bindings 36 16 - phy-names: from the *Generic PHY* bindings; supported names are "usb2-phy" 37 17 or "usb3-phy". 18 + - resets: a single pair of phandle and reset specifier 38 19 - snps,usb3_lpm_capable: determines if platform is USB3 LPM capable 39 20 - snps,disable_scramble_quirk: true when SW should disable data scrambling. 40 21 Only really useful for FPGA builds.
+63 -22
Documentation/devicetree/bindings/usb/qcom,dwc3.txt
··· 1 1 Qualcomm SuperSpeed DWC3 USB SoC controller 2 2 3 3 Required properties: 4 - - compatible: should contain "qcom,dwc3" 4 + - compatible: Compatible list, contains 5 + "qcom,dwc3" 6 + "qcom,msm8996-dwc3" for msm8996 SOC. 7 + "qcom,sdm845-dwc3" for sdm845 SOC. 8 + - reg: Offset and length of register set for QSCRATCH wrapper 9 + - power-domains: specifies a phandle to PM domain provider node 5 10 - clocks: A list of phandle + clock-specifier pairs for the 6 11 clocks listed in clock-names 7 - - clock-names: Should contain the following: 12 + - clock-names: Should contain the following: 8 13 "core" Master/Core clock, have to be >= 125 MHz for SS 9 14 operation and >= 60MHz for HS operation 15 + "mock_utmi" Mock utmi clock needed for ITP/SOF generation in 16 + host mode. Its frequency should be 19.2MHz. 17 + "sleep" Sleep clock, used for wakeup when USB3 core goes 18 + into low power mode (U3). 10 19 11 20 Optional clocks: 12 - "iface" System bus AXI clock. Not present on all platforms 13 - "sleep" Sleep clock, used when USB3 core goes into low 14 - power mode (U3). 21 + "iface" System bus AXI clock. 22 + Not present on "qcom,msm8996-dwc3" compatible. 23 + "cfg_noc" System Config NOC clock. 24 + Not present on "qcom,msm8996-dwc3" compatible. 25 + - assigned-clocks: Should be: 26 + MOCK_UTMI_CLK 27 + MASTER_CLK 28 + - assigned-clock-rates: Should be: 29 + 19.2Mhz (192000000) for MOCK_UTMI_CLK 30 + >=125Mhz (125000000) for MASTER_CLK in SS mode 31 + >=60Mhz (60000000) for MASTER_CLK in HS mode 32 + 33 + Optional properties: 34 + - resets: Phandle to reset control that resets core and wrapper. 35 + - interrupts: specifies interrupts from controller wrapper used 36 + to wakeup from low power/susepnd state. Must contain 37 + one or more entry for interrupt-names property 38 + - interrupt-names: Must include the following entries: 39 + - "hs_phy_irq": The interrupt that is asserted when a 40 + wakeup event is received on USB2 bus 41 + - "ss_phy_irq": The interrupt that is asserted when a 42 + wakeup event is received on USB3 bus 43 + - "dm_hs_phy_irq" and "dp_hs_phy_irq": Separate 44 + interrupts for any wakeup event on DM and DP lines 45 + - qcom,select-utmi-as-pipe-clk: if present, disable USB3 pipe_clk requirement. 46 + Used when dwc3 operates without SSPHY and only 47 + HS/FS/LS modes are supported. 15 48 16 49 Required child node: 17 50 A child node must exist to represent the core DWC3 IP block. The name of 18 51 the node is not important. The content of the node is defined in dwc3.txt. 19 52 20 53 Phy documentation is provided in the following places: 21 - Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt 54 + Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt - USB3 QMP PHY 55 + Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt - USB2 QUSB2 PHY 22 56 23 57 Example device nodes: 24 58 25 59 hs_phy: phy@100f8800 { 26 - compatible = "qcom,dwc3-hs-usb-phy"; 27 - reg = <0x100f8800 0x30>; 28 - clocks = <&gcc USB30_0_UTMI_CLK>; 29 - clock-names = "ref"; 30 - #phy-cells = <0>; 31 - 60 + compatible = "qcom,qusb2-v2-phy"; 61 + ... 32 62 }; 33 63 34 64 ss_phy: phy@100f8830 { 35 - compatible = "qcom,dwc3-ss-usb-phy"; 36 - reg = <0x100f8830 0x30>; 37 - clocks = <&gcc USB30_0_MASTER_CLK>; 38 - clock-names = "ref"; 39 - #phy-cells = <0>; 40 - 65 + compatible = "qcom,qmp-v3-usb3-phy"; 66 + ... 41 67 }; 42 68 43 - usb3_0: usb30@0 { 69 + usb3_0: usb30@a6f8800 { 44 70 compatible = "qcom,dwc3"; 71 + reg = <0xa6f8800 0x400>; 45 72 #address-cells = <1>; 46 73 #size-cells = <1>; 47 - clocks = <&gcc USB30_0_MASTER_CLK>; 48 - clock-names = "core"; 49 - 50 74 ranges; 51 75 76 + interrupts = <0 131 0>, <0 486 0>, <0 488 0>, <0 489 0>; 77 + interrupt-names = "hs_phy_irq", "ss_phy_irq", 78 + "dm_hs_phy_irq", "dp_hs_phy_irq"; 79 + 80 + clocks = <&gcc GCC_USB30_PRIM_MASTER_CLK>, 81 + <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>, 82 + <&gcc GCC_USB30_PRIM_SLEEP_CLK>; 83 + clock-names = "core", "mock_utmi", "sleep"; 84 + 85 + assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>, 86 + <&gcc GCC_USB30_PRIM_MASTER_CLK>; 87 + assigned-clock-rates = <19200000>, <133000000>; 88 + 89 + resets = <&gcc GCC_USB30_PRIM_BCR>; 90 + reset-names = "core_reset"; 91 + power-domains = <&gcc USB30_PRIM_GDSC>; 92 + qcom,select-utmi-as-pipe-clk; 52 93 53 94 dwc3@10000000 { 54 95 compatible = "snps,dwc3";
+1 -2
Documentation/driver-api/usb/dwc3.rst
··· 674 674 __entry->flags & DWC3_EP_ENABLED ? 'E' : 'e', 675 675 __entry->flags & DWC3_EP_STALL ? 'S' : 's', 676 676 __entry->flags & DWC3_EP_WEDGE ? 'W' : 'w', 677 - __entry->flags & DWC3_EP_BUSY ? 'B' : 'b', 677 + __entry->flags & DWC3_EP_TRANSFER_STARTED ? 'B' : 'b', 678 678 __entry->flags & DWC3_EP_PENDING_REQUEST ? 'P' : 'p', 679 - __entry->flags & DWC3_EP_MISSED_ISOC ? 'M' : 'm', 680 679 __entry->flags & DWC3_EP_END_TRANSFER_PENDING ? 'E' : 'e', 681 680 __entry->direction ? '<' : '>' 682 681 )
+7
drivers/usb/dwc2/core.c
··· 419 419 /** 420 420 * dwc2_iddig_filter_enabled() - Returns true if the IDDIG debounce 421 421 * filter is enabled. 422 + * 423 + * @hsotg: Programming view of DWC_otg controller 422 424 */ 423 425 static bool dwc2_iddig_filter_enabled(struct dwc2_hsotg *hsotg) 424 426 { ··· 566 564 * If a force is done, it requires a IDDIG debounce filter delay if 567 565 * the filter is configured and enabled. We poll the current mode of 568 566 * the controller to account for this delay. 567 + * 568 + * @hsotg: Programming view of DWC_otg controller 569 + * @host: Host mode flag 569 570 */ 570 571 void dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host) 571 572 { ··· 615 610 * or not because the value of the connector ID status is affected by 616 611 * the force mode. We only need to call this once during probe if 617 612 * dr_mode == OTG. 613 + * 614 + * @hsotg: Programming view of DWC_otg controller 618 615 */ 619 616 static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg) 620 617 {
+152 -36
drivers/usb/dwc2/core.h
··· 164 164 * and has yet to be completed (maybe due to data move, or simply 165 165 * awaiting an ack from the core all the data has been completed). 166 166 * @debugfs: File entry for debugfs file for this endpoint. 167 - * @lock: State lock to protect contents of endpoint. 168 167 * @dir_in: Set to true if this endpoint is of the IN direction, which 169 168 * means that it is sending data to the Host. 170 169 * @index: The index for the endpoint registers. 171 170 * @mc: Multi Count - number of transactions per microframe 172 - * @interval - Interval for periodic endpoints, in frames or microframes. 171 + * @interval: Interval for periodic endpoints, in frames or microframes. 173 172 * @name: The name array passed to the USB core. 174 173 * @halted: Set if the endpoint has been halted. 175 174 * @periodic: Set if this is a periodic ep, such as Interrupt ··· 177 178 * @desc_list_dma: The DMA address of descriptor chain currently in use. 178 179 * @desc_list: Pointer to descriptor DMA chain head currently in use. 179 180 * @desc_count: Count of entries within the DMA descriptor chain of EP. 180 - * @isoc_chain_num: Number of ISOC chain currently in use - either 0 or 1. 181 181 * @next_desc: index of next free descriptor in the ISOC chain under SW control. 182 + * @compl_desc: index of next descriptor to be completed by xFerComplete 182 183 * @total_data: The total number of data bytes done. 183 184 * @fifo_size: The size of the FIFO (for periodic IN endpoints) 185 + * @fifo_index: For Dedicated FIFO operation, only FIFO0 can be used for EP0. 184 186 * @fifo_load: The amount of data loaded into the FIFO (periodic IN) 185 187 * @last_load: The offset of data for the last start of request. 186 188 * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN ··· 231 231 struct dwc2_dma_desc *desc_list; 232 232 u8 desc_count; 233 233 234 - unsigned char isoc_chain_num; 235 234 unsigned int next_desc; 235 + unsigned int compl_desc; 236 236 237 237 char name[10]; 238 238 }; ··· 380 380 * is FS. 381 381 * 0 - No (default) 382 382 * 1 - Yes 383 + * @ipg_isoc_en: Indicates the IPG supports is enabled or disabled. 384 + * 0 - Disable (default) 385 + * 1 - Enable 386 + * @acg_enable: For enabling Active Clock Gating in the controller 387 + * 0 - No 388 + * 1 - Yes 383 389 * @ulpi_fs_ls: Make ULPI phy operate in FS/LS mode only 384 390 * 0 - No (default) 385 391 * 1 - Yes ··· 517 511 bool hird_threshold_en; 518 512 u8 hird_threshold; 519 513 bool activate_stm_fs_transceiver; 514 + bool ipg_isoc_en; 520 515 u16 max_packet_count; 521 516 u32 max_transfer_size; 522 517 u32 ahbcfg; ··· 555 548 * 556 549 * The values that are not in dwc2_core_params are documented below. 557 550 * 558 - * @op_mode Mode of Operation 551 + * @op_mode: Mode of Operation 559 552 * 0 - HNP- and SRP-Capable OTG (Host & Device) 560 553 * 1 - SRP-Capable OTG (Host & Device) 561 554 * 2 - Non-HNP and Non-SRP Capable OTG (Host & Device) ··· 563 556 * 4 - Non-OTG Device 564 557 * 5 - SRP-Capable Host 565 558 * 6 - Non-OTG Host 566 - * @arch Architecture 559 + * @arch: Architecture 567 560 * 0 - Slave only 568 561 * 1 - External DMA 569 562 * 2 - Internal DMA 570 - * @power_optimized Are power optimizations enabled? 571 - * @num_dev_ep Number of device endpoints available 572 - * @num_dev_in_eps Number of device IN endpoints available 573 - * @num_dev_perio_in_ep Number of device periodic IN endpoints 574 - * available 575 - * @dev_token_q_depth Device Mode IN Token Sequence Learning Queue 563 + * @ipg_isoc_en: This feature indicates that the controller supports 564 + * the worst-case scenario of Rx followed by Rx 565 + * Interpacket Gap (IPG) (32 bitTimes) as per the utmi 566 + * specification for any token following ISOC OUT token. 567 + * 0 - Don't support 568 + * 1 - Support 569 + * @power_optimized: Are power optimizations enabled? 570 + * @num_dev_ep: Number of device endpoints available 571 + * @num_dev_in_eps: Number of device IN endpoints available 572 + * @num_dev_perio_in_ep: Number of device periodic IN endpoints 573 + * available 574 + * @dev_token_q_depth: Device Mode IN Token Sequence Learning Queue 576 575 * Depth 577 576 * 0 to 30 578 - * @host_perio_tx_q_depth 577 + * @host_perio_tx_q_depth: 579 578 * Host Mode Periodic Request Queue Depth 580 579 * 2, 4 or 8 581 - * @nperio_tx_q_depth 580 + * @nperio_tx_q_depth: 582 581 * Non-Periodic Request Queue Depth 583 582 * 2, 4 or 8 584 - * @hs_phy_type High-speed PHY interface type 583 + * @hs_phy_type: High-speed PHY interface type 585 584 * 0 - High-speed interface not supported 586 585 * 1 - UTMI+ 587 586 * 2 - ULPI 588 587 * 3 - UTMI+ and ULPI 589 - * @fs_phy_type Full-speed PHY interface type 588 + * @fs_phy_type: Full-speed PHY interface type 590 589 * 0 - Full speed interface not supported 591 590 * 1 - Dedicated full speed interface 592 591 * 2 - FS pins shared with UTMI+ pins 593 592 * 3 - FS pins shared with ULPI pins 594 593 * @total_fifo_size: Total internal RAM for FIFOs (bytes) 595 - * @hibernation Is hibernation enabled? 596 - * @utmi_phy_data_width UTMI+ PHY data width 594 + * @hibernation: Is hibernation enabled? 595 + * @utmi_phy_data_width: UTMI+ PHY data width 597 596 * 0 - 8 bits 598 597 * 1 - 16 bits 599 598 * 2 - 8 or 16 bits 600 599 * @snpsid: Value from SNPSID register 601 600 * @dev_ep_dirs: Direction of device endpoints (GHWCFG1) 602 - * @g_tx_fifo_size[] Power-on values of TxFIFO sizes 601 + * @g_tx_fifo_size: Power-on values of TxFIFO sizes 602 + * @dma_desc_enable: When DMA mode is enabled, specifies whether to use 603 + * address DMA mode or descriptor DMA mode for accessing 604 + * the data FIFOs. The driver will automatically detect the 605 + * value for this if none is specified. 606 + * 0 - Address DMA 607 + * 1 - Descriptor DMA (default, if available) 608 + * @enable_dynamic_fifo: 0 - Use coreConsultant-specified FIFO size parameters 609 + * 1 - Allow dynamic FIFO sizing (default, if available) 610 + * @en_multiple_tx_fifo: Specifies whether dedicated per-endpoint transmit FIFOs 611 + * are enabled for non-periodic IN endpoints in device 612 + * mode. 613 + * @host_nperio_tx_fifo_size: Number of 4-byte words in the non-periodic Tx FIFO 614 + * in host mode when dynamic FIFO sizing is enabled 615 + * 16 to 32768 616 + * Actual maximum value is autodetected and also 617 + * the default. 618 + * @host_perio_tx_fifo_size: Number of 4-byte words in the periodic Tx FIFO in 619 + * host mode when dynamic FIFO sizing is enabled 620 + * 16 to 32768 621 + * Actual maximum value is autodetected and also 622 + * the default. 623 + * @max_transfer_size: The maximum transfer size supported, in bytes 624 + * 2047 to 65,535 625 + * Actual maximum value is autodetected and also 626 + * the default. 627 + * @max_packet_count: The maximum number of packets in a transfer 628 + * 15 to 511 629 + * Actual maximum value is autodetected and also 630 + * the default. 631 + * @host_channels: The number of host channel registers to use 632 + * 1 to 16 633 + * Actual maximum value is autodetected and also 634 + * the default. 635 + * @dev_nperio_tx_fifo_size: Number of 4-byte words in the non-periodic Tx FIFO 636 + * in device mode when dynamic FIFO sizing is enabled 637 + * 16 to 32768 638 + * Actual maximum value is autodetected and also 639 + * the default. 640 + * @i2c_enable: Specifies whether to use the I2Cinterface for a full 641 + * speed PHY. This parameter is only applicable if phy_type 642 + * is FS. 643 + * 0 - No (default) 644 + * 1 - Yes 645 + * @acg_enable: For enabling Active Clock Gating in the controller 646 + * 0 - Disable 647 + * 1 - Enable 648 + * @lpm_mode: For enabling Link Power Management in the controller 649 + * 0 - Disable 650 + * 1 - Enable 651 + * @rx_fifo_size: Number of 4-byte words in the Rx FIFO when dynamic 652 + * FIFO sizing is enabled 16 to 32768 653 + * Actual maximum value is autodetected and also 654 + * the default. 603 655 */ 604 656 struct dwc2_hw_params { 605 657 unsigned op_mode:3; ··· 688 622 unsigned hibernation:1; 689 623 unsigned utmi_phy_data_width:2; 690 624 unsigned lpm_mode:1; 625 + unsigned ipg_isoc_en:1; 691 626 u32 snpsid; 692 627 u32 dev_ep_dirs; 693 628 u32 g_tx_fifo_size[MAX_EPS_CHANNELS]; ··· 709 642 * @gi2cctl: Backup of GI2CCTL register 710 643 * @glpmcfg: Backup of GLPMCFG register 711 644 * @gdfifocfg: Backup of GDFIFOCFG register 645 + * @pcgcctl: Backup of PCGCCTL register 646 + * @pcgcctl1: Backup of PCGCCTL1 register 647 + * @dtxfsiz: Backup of DTXFSIZ registers for each endpoint 712 648 * @gpwrdn: Backup of GPWRDN register 649 + * @valid: True if registers values backuped. 713 650 */ 714 651 struct dwc2_gregs_backup { 715 652 u32 gotgctl; ··· 746 675 * @doeptsiz: Backup of DOEPTSIZ register 747 676 * @doepdma: Backup of DOEPDMA register 748 677 * @dtxfsiz: Backup of DTXFSIZ registers for each endpoint 678 + * @valid: True if registers values backuped. 749 679 */ 750 680 struct dwc2_dregs_backup { 751 681 u32 dcfg; ··· 770 698 * @hcfg: Backup of HCFG register 771 699 * @haintmsk: Backup of HAINTMSK register 772 700 * @hcintmsk: Backup of HCINTMSK register 773 - * @hptr0: Backup of HPTR0 register 701 + * @hprt0: Backup of HPTR0 register 774 702 * @hfir: Backup of HFIR register 775 703 * @hptxfsiz: Backup of HPTXFSIZ register 704 + * @valid: True if registers values backuped. 776 705 */ 777 706 struct dwc2_hregs_backup { 778 707 u32 hcfg; ··· 873 800 * @regs: Pointer to controller regs 874 801 * @hw_params: Parameters that were autodetected from the 875 802 * hardware registers 876 - * @core_params: Parameters that define how the core should be configured 803 + * @params: Parameters that define how the core should be configured 877 804 * @op_state: The operational State, during transitions (a_host=> 878 805 * a_peripheral and b_device=>b_host) this may not match 879 806 * the core, but allows the software to determine ··· 882 809 * - USB_DR_MODE_PERIPHERAL 883 810 * - USB_DR_MODE_HOST 884 811 * - USB_DR_MODE_OTG 885 - * @hcd_enabled Host mode sub-driver initialization indicator. 886 - * @gadget_enabled Peripheral mode sub-driver initialization indicator. 887 - * @ll_hw_enabled Status of low-level hardware resources. 812 + * @hcd_enabled: Host mode sub-driver initialization indicator. 813 + * @gadget_enabled: Peripheral mode sub-driver initialization indicator. 814 + * @ll_hw_enabled: Status of low-level hardware resources. 888 815 * @hibernated: True if core is hibernated 816 + * @frame_number: Frame number read from the core. For both device 817 + * and host modes. The value ranges are from 0 818 + * to HFNUM_MAX_FRNUM. 889 819 * @phy: The otg phy transceiver structure for phy control. 890 820 * @uphy: The otg phy transceiver structure for old USB phy 891 821 * control. ··· 908 832 * interrupt 909 833 * @wkp_timer: Timer object for handling Wakeup Detected interrupt 910 834 * @lx_state: Lx state of connected device 911 - * @gregs_backup: Backup of global registers during suspend 912 - * @dregs_backup: Backup of device registers during suspend 913 - * @hregs_backup: Backup of host registers during suspend 835 + * @gr_backup: Backup of global registers during suspend 836 + * @dr_backup: Backup of device registers during suspend 837 + * @hr_backup: Backup of host registers during suspend 914 838 * 915 839 * These are for host mode: 916 840 * 917 841 * @flags: Flags for handling root port state changes 842 + * @flags.d32: Contain all root port flags 843 + * @flags.b: Separate root port flags from each other 844 + * @flags.b.port_connect_status_change: True if root port connect status 845 + * changed 846 + * @flags.b.port_connect_status: True if device connected to root port 847 + * @flags.b.port_reset_change: True if root port reset status changed 848 + * @flags.b.port_enable_change: True if root port enable status changed 849 + * @flags.b.port_suspend_change: True if root port suspend status changed 850 + * @flags.b.port_over_current_change: True if root port over current state 851 + * changed. 852 + * @flags.b.port_l1_change: True if root port l1 status changed 853 + * @flags.b.reserved: Reserved bits of root port register 918 854 * @non_periodic_sched_inactive: Inactive QHs in the non-periodic schedule. 919 855 * Transfers associated with these QHs are not currently 920 856 * assigned to a host channel. ··· 935 847 * assigned to a host channel. 936 848 * @non_periodic_qh_ptr: Pointer to next QH to process in the active 937 849 * non-periodic schedule 850 + * @non_periodic_sched_waiting: Waiting QHs in the non-periodic schedule. 851 + * Transfers associated with these QHs are not currently 852 + * assigned to a host channel. 938 853 * @periodic_sched_inactive: Inactive QHs in the periodic schedule. This is a 939 854 * list of QHs for periodic transfers that are _not_ 940 855 * scheduled for the next frame. Each QH in the list has an ··· 977 886 * @hs_periodic_bitmap: Bitmap used by the microframe scheduler any time the 978 887 * host is in high speed mode; low speed schedules are 979 888 * stored elsewhere since we need one per TT. 980 - * @frame_number: Frame number read from the core at SOF. The value ranges 981 - * from 0 to HFNUM_MAX_FRNUM. 982 889 * @periodic_qh_count: Count of periodic QHs, if using several eps. Used for 983 890 * SOF enable/disable. 984 891 * @free_hc_list: Free host channels in the controller. This is a list of ··· 987 898 * host channel is available for non-periodic transactions. 988 899 * @non_periodic_channels: Number of host channels assigned to non-periodic 989 900 * transfers 990 - * @available_host_channels Number of host channels available for the microframe 991 - * scheduler to use 901 + * @available_host_channels: Number of host channels available for the 902 + * microframe scheduler to use 992 903 * @hc_ptr_array: Array of pointers to the host channel descriptors. 993 904 * Allows accessing a host channel descriptor given the 994 905 * host channel number. This is useful in interrupt ··· 1011 922 * @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos. 1012 923 * @num_of_eps: Number of available EPs (excluding EP0) 1013 924 * @debug_root: Root directrory for debugfs. 1014 - * @debug_file: Main status file for debugfs. 1015 - * @debug_testmode: Testmode status file for debugfs. 1016 - * @debug_fifo: FIFO status file for debugfs. 1017 925 * @ep0_reply: Request used for ep0 reply. 1018 926 * @ep0_buff: Buffer for EP0 reply data, if needed. 1019 927 * @ctrl_buff: Buffer for EP0 control requests. ··· 1025 939 * @ctrl_in_desc: EP0 IN data phase desc chain pointer 1026 940 * @ctrl_out_desc_dma: EP0 OUT data phase desc chain DMA address 1027 941 * @ctrl_out_desc: EP0 OUT data phase desc chain pointer 1028 - * @eps: The endpoints being supplied to the gadget framework 942 + * @irq: Interrupt request line number 943 + * @clk: Pointer to otg clock 944 + * @reset: Pointer to dwc2 reset controller 945 + * @reset_ecc: Pointer to dwc2 optional reset controller in Stratix10. 946 + * @regset: A pointer to a struct debugfs_regset32, which contains 947 + * a pointer to an array of register definitions, the 948 + * array size and the base address where the register bank 949 + * is to be found. 950 + * @bus_suspended: True if bus is suspended 951 + * @last_frame_num: Number of last frame. Range from 0 to 32768 952 + * @frame_num_array: Used only if CONFIG_USB_DWC2_TRACK_MISSED_SOFS is 953 + * defined, for missed SOFs tracking. Array holds that 954 + * frame numbers, which not equal to last_frame_num +1 955 + * @last_frame_num_array: Used only if CONFIG_USB_DWC2_TRACK_MISSED_SOFS is 956 + * defined, for missed SOFs tracking. 957 + * If current_frame_number != last_frame_num+1 958 + * then last_frame_num added to this array 959 + * @frame_num_idx: Actual size of frame_num_array and last_frame_num_array 960 + * @dumped_frame_num_array: 1 - if missed SOFs frame numbers dumbed 961 + * 0 - if missed SOFs frame numbers not dumbed 962 + * @fifo_mem: Total internal RAM for FIFOs (bytes) 963 + * @fifo_map: Each bit intend for concrete fifo. If that bit is set, 964 + * then that fifo is used 965 + * @gadget: Represents a usb slave device 966 + * @connected: Used in slave mode. True if device connected with host 967 + * @eps_in: The IN endpoints being supplied to the gadget framework 968 + * @eps_out: The OUT endpoints being supplied to the gadget framework 969 + * @new_connection: Used in host mode. True if there are new connected 970 + * device 971 + * @enabled: Indicates the enabling state of controller 972 + * 1029 973 */ 1030 974 struct dwc2_hsotg { 1031 975 struct device *dev; ··· 1070 954 unsigned int gadget_enabled:1; 1071 955 unsigned int ll_hw_enabled:1; 1072 956 unsigned int hibernated:1; 957 + u16 frame_number; 1073 958 1074 959 struct phy *phy; 1075 960 struct usb_phy *uphy; ··· 1146 1029 u16 periodic_usecs; 1147 1030 unsigned long hs_periodic_bitmap[ 1148 1031 DIV_ROUND_UP(DWC2_HS_SCHEDULE_US, BITS_PER_LONG)]; 1149 - u16 frame_number; 1150 1032 u16 periodic_qh_count; 1151 1033 bool bus_suspended; 1152 1034 bool new_connection;
+8
drivers/usb/dwc2/core_intr.c
··· 778 778 goto out; 779 779 } 780 780 781 + /* Reading current frame number value in device or host modes. */ 782 + if (dwc2_is_device_mode(hsotg)) 783 + hsotg->frame_number = (dwc2_readl(hsotg->regs + DSTS) 784 + & DSTS_SOFFN_MASK) >> DSTS_SOFFN_SHIFT; 785 + else 786 + hsotg->frame_number = (dwc2_readl(hsotg->regs + HFNUM) 787 + & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT; 788 + 781 789 gintsts = dwc2_read_common_intr(hsotg); 782 790 if (gintsts & ~GINTSTS_PRTINT) 783 791 retval = IRQ_HANDLED;
+1 -1
drivers/usb/dwc2/debug.h
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /** 2 + /* 3 3 * debug.h - Designware USB2 DRD controller debug header 4 4 * 5 5 * Copyright (C) 2015 Intel Corporation
+12 -10
drivers/usb/dwc2/debugfs.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /** 2 + /* 3 3 * debugfs.c - Designware USB2 DRD controller debugfs 4 4 * 5 5 * Copyright (C) 2015 Intel Corporation ··· 16 16 17 17 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ 18 18 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 19 + 19 20 /** 20 - * testmode_write - debugfs: change usb test mode 21 - * @seq: The seq file to write to. 22 - * @v: Unused parameter. 23 - * 24 - * This debugfs entry modify the current usb test mode. 21 + * testmode_write() - change usb test mode state. 22 + * @file: The file to write to. 23 + * @ubuf: The buffer where user wrote. 24 + * @count: The ubuf size. 25 + * @ppos: Unused parameter. 25 26 */ 26 27 static ssize_t testmode_write(struct file *file, const char __user *ubuf, size_t 27 28 count, loff_t *ppos) ··· 56 55 } 57 56 58 57 /** 59 - * testmode_show - debugfs: show usb test mode state 60 - * @seq: The seq file to write to. 61 - * @v: Unused parameter. 58 + * testmode_show() - debugfs: show usb test mode state 59 + * @s: The seq file to write to. 60 + * @unused: Unused parameter. 62 61 * 63 62 * This debugfs entry shows which usb test mode is currently enabled. 64 63 */ ··· 369 368 dump_register(GINTSTS), 370 369 dump_register(GINTMSK), 371 370 dump_register(GRXSTSR), 372 - dump_register(GRXSTSP), 371 + /* Omit GRXSTSP */ 373 372 dump_register(GRXFSIZ), 374 373 dump_register(GNPTXFSIZ), 375 374 dump_register(GNPTXSTS), ··· 711 710 print_param(seq, p, phy_ulpi_ddr); 712 711 print_param(seq, p, phy_ulpi_ext_vbus); 713 712 print_param(seq, p, i2c_enable); 713 + print_param(seq, p, ipg_isoc_en); 714 714 print_param(seq, p, ulpi_fs_ls); 715 715 print_param(seq, p, host_support_fs_ls_low_power); 716 716 print_param(seq, p, host_ls_low_power_phy_clk);
+181 -161
drivers/usb/dwc2/gadget.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /** 2 + /* 3 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 4 * http://www.samsung.com 5 5 * ··· 107 107 /** 108 108 * dwc2_gadget_incr_frame_num - Increments the targeted frame number. 109 109 * @hs_ep: The endpoint 110 - * @increment: The value to increment by 111 110 * 112 111 * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT. 113 112 * If an overrun occurs it will wrap the value and set the frame_overrun flag. ··· 189 190 190 191 /** 191 192 * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode 193 + * 194 + * @hsotg: Programming view of the DWC_otg controller 192 195 */ 193 196 int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg) 194 197 { ··· 205 204 /** 206 205 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for 207 206 * device mode TX FIFOs 207 + * 208 + * @hsotg: Programming view of the DWC_otg controller 208 209 */ 209 210 int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg) 210 211 { ··· 230 227 /** 231 228 * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode 232 229 * TX FIFOs 230 + * 231 + * @hsotg: Programming view of the DWC_otg controller 233 232 */ 234 233 int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg) 235 234 { ··· 332 327 } 333 328 334 329 /** 330 + * dwc2_hsotg_ep_alloc_request - allocate USB rerequest structure 335 331 * @ep: USB endpoint to allocate request for. 336 332 * @flags: Allocation flags 337 333 * ··· 799 793 * @dma_buff: usb requests dma buffer. 800 794 * @len: usb request transfer length. 801 795 * 802 - * Finds out index of first free entry either in the bottom or up half of 803 - * descriptor chain depend on which is under SW control and not processed 804 - * by HW. Then fills that descriptor with the data of the arrived usb request, 796 + * Fills next free descriptor with the data of the arrived usb request, 805 797 * frame info, sets Last and IOC bits increments next_desc. If filled 806 798 * descriptor is not the first one, removes L bit from the previous descriptor 807 799 * status. ··· 814 810 u32 mask = 0; 815 811 816 812 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); 817 - if (len > maxsize) { 818 - dev_err(hsotg->dev, "wrong len %d\n", len); 819 - return -EINVAL; 820 - } 821 813 822 - /* 823 - * If SW has already filled half of chain, then return and wait for 824 - * the other chain to be processed by HW. 825 - */ 826 - if (hs_ep->next_desc == MAX_DMA_DESC_NUM_GENERIC / 2) 827 - return -EBUSY; 828 - 829 - /* Increment frame number by interval for IN */ 830 - if (hs_ep->dir_in) 831 - dwc2_gadget_incr_frame_num(hs_ep); 832 - 833 - index = (MAX_DMA_DESC_NUM_GENERIC / 2) * hs_ep->isoc_chain_num + 834 - hs_ep->next_desc; 835 - 836 - /* Sanity check of calculated index */ 837 - if ((hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC) || 838 - (!hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC / 2)) { 839 - dev_err(hsotg->dev, "wrong index %d for iso chain\n", index); 840 - return -EINVAL; 841 - } 842 - 814 + index = hs_ep->next_desc; 843 815 desc = &hs_ep->desc_list[index]; 816 + 817 + /* Check if descriptor chain full */ 818 + if ((desc->status >> DEV_DMA_BUFF_STS_SHIFT) == 819 + DEV_DMA_BUFF_STS_HREADY) { 820 + dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__); 821 + return 1; 822 + } 844 823 845 824 /* Clear L bit of previous desc if more than one entries in the chain */ 846 825 if (hs_ep->next_desc) ··· 852 865 desc->status &= ~DEV_DMA_BUFF_STS_MASK; 853 866 desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT); 854 867 868 + /* Increment frame number by interval for IN */ 869 + if (hs_ep->dir_in) 870 + dwc2_gadget_incr_frame_num(hs_ep); 871 + 855 872 /* Update index of last configured entry in the chain */ 856 873 hs_ep->next_desc++; 874 + if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_GENERIC) 875 + hs_ep->next_desc = 0; 857 876 858 877 return 0; 859 878 } ··· 868 875 * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA 869 876 * @hs_ep: The isochronous endpoint. 870 877 * 871 - * Prepare first descriptor chain for isochronous endpoints. Afterwards 878 + * Prepare descriptor chain for isochronous endpoints. Afterwards 872 879 * write DMA address to HW and enable the endpoint. 873 - * 874 - * Switch between descriptor chains via isoc_chain_num to give SW opportunity 875 - * to prepare second descriptor chain while first one is being processed by HW. 876 880 */ 877 881 static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) 878 882 { ··· 877 887 struct dwc2_hsotg_req *hs_req, *treq; 878 888 int index = hs_ep->index; 879 889 int ret; 890 + int i; 880 891 u32 dma_reg; 881 892 u32 depctl; 882 893 u32 ctrl; 894 + struct dwc2_dma_desc *desc; 883 895 884 896 if (list_empty(&hs_ep->queue)) { 885 897 dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__); 886 898 return; 887 899 } 888 900 901 + /* Initialize descriptor chain by Host Busy status */ 902 + for (i = 0; i < MAX_DMA_DESC_NUM_GENERIC; i++) { 903 + desc = &hs_ep->desc_list[i]; 904 + desc->status = 0; 905 + desc->status |= (DEV_DMA_BUFF_STS_HBUSY 906 + << DEV_DMA_BUFF_STS_SHIFT); 907 + } 908 + 909 + hs_ep->next_desc = 0; 889 910 list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) { 890 911 ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma, 891 912 hs_req->req.length); 892 - if (ret) { 893 - dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__); 913 + if (ret) 894 914 break; 895 - } 896 915 } 897 916 917 + hs_ep->compl_desc = 0; 898 918 depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); 899 919 dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index); 900 920 ··· 914 914 ctrl = dwc2_readl(hsotg->regs + depctl); 915 915 ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK; 916 916 dwc2_writel(ctrl, hsotg->regs + depctl); 917 - 918 - /* Switch ISOC descriptor chain number being processed by SW*/ 919 - hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1; 920 - hs_ep->next_desc = 0; 921 917 } 922 918 923 919 /** ··· 1231 1235 { 1232 1236 struct dwc2_hsotg *hsotg = hs_ep->parent; 1233 1237 u32 target_frame = hs_ep->target_frame; 1234 - u32 current_frame = dwc2_hsotg_read_frameno(hsotg); 1238 + u32 current_frame = hsotg->frame_number; 1235 1239 bool frame_overrun = hs_ep->frame_overrun; 1236 1240 1237 1241 if (!frame_overrun && current_frame >= target_frame) ··· 1287 1291 struct dwc2_hsotg *hs = hs_ep->parent; 1288 1292 bool first; 1289 1293 int ret; 1294 + u32 maxsize = 0; 1295 + u32 mask = 0; 1296 + 1290 1297 1291 1298 dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n", 1292 1299 ep->name, req, req->length, req->buf, req->no_interrupt, ··· 1306 1307 INIT_LIST_HEAD(&hs_req->queue); 1307 1308 req->actual = 0; 1308 1309 req->status = -EINPROGRESS; 1310 + 1311 + /* In DDMA mode for ISOC's don't queue request if length greater 1312 + * than descriptor limits. 1313 + */ 1314 + if (using_desc_dma(hs) && hs_ep->isochronous) { 1315 + maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); 1316 + if (hs_ep->dir_in && req->length > maxsize) { 1317 + dev_err(hs->dev, "wrong length %d (maxsize=%d)\n", 1318 + req->length, maxsize); 1319 + return -EINVAL; 1320 + } 1321 + 1322 + if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) { 1323 + dev_err(hs->dev, "ISOC OUT: wrong length %d (mps=%d)\n", 1324 + req->length, hs_ep->ep.maxpacket); 1325 + return -EINVAL; 1326 + } 1327 + } 1309 1328 1310 1329 ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req); 1311 1330 if (ret) ··· 1347 1330 1348 1331 /* 1349 1332 * Handle DDMA isochronous transfers separately - just add new entry 1350 - * to the half of descriptor chain that is not processed by HW. 1333 + * to the descriptor chain. 1351 1334 * Transfer will be started once SW gets either one of NAK or 1352 1335 * OutTknEpDis interrupts. 1353 1336 */ 1354 - if (using_desc_dma(hs) && hs_ep->isochronous && 1355 - hs_ep->target_frame != TARGET_FRAME_INITIAL) { 1356 - ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma, 1357 - hs_req->req.length); 1358 - if (ret) 1359 - dev_dbg(hs->dev, "%s: ISO desc chain full\n", __func__); 1360 - 1337 + if (using_desc_dma(hs) && hs_ep->isochronous) { 1338 + if (hs_ep->target_frame != TARGET_FRAME_INITIAL) { 1339 + dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma, 1340 + hs_req->req.length); 1341 + } 1361 1342 return 0; 1362 1343 } 1363 1344 ··· 1365 1350 return 0; 1366 1351 } 1367 1352 1368 - while (dwc2_gadget_target_frame_elapsed(hs_ep)) 1353 + /* Update current frame number value. */ 1354 + hs->frame_number = dwc2_hsotg_read_frameno(hs); 1355 + while (dwc2_gadget_target_frame_elapsed(hs_ep)) { 1369 1356 dwc2_gadget_incr_frame_num(hs_ep); 1357 + /* Update current frame number value once more as it 1358 + * changes here. 1359 + */ 1360 + hs->frame_number = dwc2_hsotg_read_frameno(hs); 1361 + } 1370 1362 1371 1363 if (hs_ep->target_frame != TARGET_FRAME_INITIAL) 1372 1364 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false); ··· 2033 2011 * @hs_ep: The endpoint the request was on. 2034 2012 * 2035 2013 * Get first request from the ep queue, determine descriptor on which complete 2036 - * happened. SW based on isoc_chain_num discovers which half of the descriptor 2037 - * chain is currently in use by HW, adjusts dma_address and calculates index 2038 - * of completed descriptor based on the value of DEPDMA register. Update actual 2039 - * length of request, giveback to gadget. 2014 + * happened. SW discovers which descriptor currently in use by HW, adjusts 2015 + * dma_address and calculates index of completed descriptor based on the value 2016 + * of DEPDMA register. Update actual length of request, giveback to gadget. 2040 2017 */ 2041 2018 static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep) 2042 2019 { 2043 2020 struct dwc2_hsotg *hsotg = hs_ep->parent; 2044 2021 struct dwc2_hsotg_req *hs_req; 2045 2022 struct usb_request *ureq; 2046 - int index; 2047 - dma_addr_t dma_addr; 2048 - u32 dma_reg; 2049 - u32 depdma; 2050 2023 u32 desc_sts; 2051 2024 u32 mask; 2052 2025 2053 - hs_req = get_ep_head(hs_ep); 2054 - if (!hs_req) { 2055 - dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__); 2056 - return; 2026 + desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status; 2027 + 2028 + /* Process only descriptors with buffer status set to DMA done */ 2029 + while ((desc_sts & DEV_DMA_BUFF_STS_MASK) >> 2030 + DEV_DMA_BUFF_STS_SHIFT == DEV_DMA_BUFF_STS_DMADONE) { 2031 + 2032 + hs_req = get_ep_head(hs_ep); 2033 + if (!hs_req) { 2034 + dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__); 2035 + return; 2036 + } 2037 + ureq = &hs_req->req; 2038 + 2039 + /* Check completion status */ 2040 + if ((desc_sts & DEV_DMA_STS_MASK) >> DEV_DMA_STS_SHIFT == 2041 + DEV_DMA_STS_SUCC) { 2042 + mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK : 2043 + DEV_DMA_ISOC_RX_NBYTES_MASK; 2044 + ureq->actual = ureq->length - ((desc_sts & mask) >> 2045 + DEV_DMA_ISOC_NBYTES_SHIFT); 2046 + 2047 + /* Adjust actual len for ISOC Out if len is 2048 + * not align of 4 2049 + */ 2050 + if (!hs_ep->dir_in && ureq->length & 0x3) 2051 + ureq->actual += 4 - (ureq->length & 0x3); 2052 + } 2053 + 2054 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2055 + 2056 + hs_ep->compl_desc++; 2057 + if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_GENERIC - 1)) 2058 + hs_ep->compl_desc = 0; 2059 + desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status; 2057 2060 } 2058 - ureq = &hs_req->req; 2059 - 2060 - dma_addr = hs_ep->desc_list_dma; 2061 - 2062 - /* 2063 - * If lower half of descriptor chain is currently use by SW, 2064 - * that means higher half is being processed by HW, so shift 2065 - * DMA address to higher half of descriptor chain. 2066 - */ 2067 - if (!hs_ep->isoc_chain_num) 2068 - dma_addr += sizeof(struct dwc2_dma_desc) * 2069 - (MAX_DMA_DESC_NUM_GENERIC / 2); 2070 - 2071 - dma_reg = hs_ep->dir_in ? DIEPDMA(hs_ep->index) : DOEPDMA(hs_ep->index); 2072 - depdma = dwc2_readl(hsotg->regs + dma_reg); 2073 - 2074 - index = (depdma - dma_addr) / sizeof(struct dwc2_dma_desc) - 1; 2075 - desc_sts = hs_ep->desc_list[index].status; 2076 - 2077 - mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK : 2078 - DEV_DMA_ISOC_RX_NBYTES_MASK; 2079 - ureq->actual = ureq->length - 2080 - ((desc_sts & mask) >> DEV_DMA_ISOC_NBYTES_SHIFT); 2081 - 2082 - /* Adjust actual length for ISOC Out if length is not align of 4 */ 2083 - if (!hs_ep->dir_in && ureq->length & 0x3) 2084 - ureq->actual += 4 - (ureq->length & 0x3); 2085 - 2086 - dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2087 2061 } 2088 2062 2089 2063 /* 2090 - * dwc2_gadget_start_next_isoc_ddma - start next isoc request, if any. 2091 - * @hs_ep: The isochronous endpoint to be re-enabled. 2064 + * dwc2_gadget_handle_isoc_bna - handle BNA interrupt for ISOC. 2065 + * @hs_ep: The isochronous endpoint. 2092 2066 * 2093 - * If ep has been disabled due to last descriptor servicing (IN endpoint) or 2094 - * BNA (OUT endpoint) check the status of other half of descriptor chain that 2095 - * was under SW control till HW was busy and restart the endpoint if needed. 2067 + * If EP ISOC OUT then need to flush RX FIFO to remove source of BNA 2068 + * interrupt. Reset target frame and next_desc to allow to start 2069 + * ISOC's on NAK interrupt for IN direction or on OUTTKNEPDIS 2070 + * interrupt for OUT direction. 2096 2071 */ 2097 - static void dwc2_gadget_start_next_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) 2072 + static void dwc2_gadget_handle_isoc_bna(struct dwc2_hsotg_ep *hs_ep) 2098 2073 { 2099 2074 struct dwc2_hsotg *hsotg = hs_ep->parent; 2100 - u32 depctl; 2101 - u32 dma_reg; 2102 - u32 ctrl; 2103 - u32 dma_addr = hs_ep->desc_list_dma; 2104 - unsigned char index = hs_ep->index; 2105 2075 2106 - dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index); 2107 - depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); 2076 + if (!hs_ep->dir_in) 2077 + dwc2_flush_rx_fifo(hsotg); 2078 + dwc2_hsotg_complete_request(hsotg, hs_ep, get_ep_head(hs_ep), 0); 2108 2079 2109 - ctrl = dwc2_readl(hsotg->regs + depctl); 2110 - 2111 - /* 2112 - * EP was disabled if HW has processed last descriptor or BNA was set. 2113 - * So restart ep if SW has prepared new descriptor chain in ep_queue 2114 - * routine while HW was busy. 2115 - */ 2116 - if (!(ctrl & DXEPCTL_EPENA)) { 2117 - if (!hs_ep->next_desc) { 2118 - dev_dbg(hsotg->dev, "%s: No more ISOC requests\n", 2119 - __func__); 2120 - return; 2121 - } 2122 - 2123 - dma_addr += sizeof(struct dwc2_dma_desc) * 2124 - (MAX_DMA_DESC_NUM_GENERIC / 2) * 2125 - hs_ep->isoc_chain_num; 2126 - dwc2_writel(dma_addr, hsotg->regs + dma_reg); 2127 - 2128 - ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK; 2129 - dwc2_writel(ctrl, hsotg->regs + depctl); 2130 - 2131 - /* Switch ISOC descriptor chain number being processed by SW*/ 2132 - hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1; 2133 - hs_ep->next_desc = 0; 2134 - 2135 - dev_dbg(hsotg->dev, "%s: Restarted isochronous endpoint\n", 2136 - __func__); 2137 - } 2080 + hs_ep->target_frame = TARGET_FRAME_INITIAL; 2081 + hs_ep->next_desc = 0; 2082 + hs_ep->compl_desc = 0; 2138 2083 } 2139 2084 2140 2085 /** ··· 2430 2441 * @ep: The index number of the endpoint 2431 2442 * @mps: The maximum packet size in bytes 2432 2443 * @mc: The multicount value 2444 + * @dir_in: True if direction is in. 2433 2445 * 2434 2446 * Configure the maximum packet size for the given endpoint, updating 2435 2447 * the hardware control registers to reflect this. ··· 2721 2731 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 2722 2732 -ENODATA); 2723 2733 dwc2_gadget_incr_frame_num(hs_ep); 2734 + /* Update current frame number value. */ 2735 + hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg); 2724 2736 } while (dwc2_gadget_target_frame_elapsed(hs_ep)); 2725 2737 2726 2738 dwc2_gadget_start_next_request(hs_ep); ··· 2730 2738 2731 2739 /** 2732 2740 * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS 2733 - * @hs_ep: The endpoint on which interrupt is asserted. 2741 + * @ep: The endpoint on which interrupt is asserted. 2734 2742 * 2735 2743 * This is starting point for ISOC-OUT transfer, synchronization done with 2736 2744 * first out token received from host while corresponding EP is disabled. ··· 2755 2763 */ 2756 2764 tmp = dwc2_hsotg_read_frameno(hsotg); 2757 2765 2758 - dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), -ENODATA); 2766 + dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), 0); 2759 2767 2760 2768 if (using_desc_dma(hsotg)) { 2761 2769 if (ep->target_frame == TARGET_FRAME_INITIAL) { ··· 2808 2816 { 2809 2817 struct dwc2_hsotg *hsotg = hs_ep->parent; 2810 2818 int dir_in = hs_ep->dir_in; 2819 + u32 tmp; 2811 2820 2812 2821 if (!dir_in || !hs_ep->isochronous) 2813 2822 return; 2814 2823 2815 2824 if (hs_ep->target_frame == TARGET_FRAME_INITIAL) { 2816 - hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg); 2817 2825 2826 + tmp = dwc2_hsotg_read_frameno(hsotg); 2818 2827 if (using_desc_dma(hsotg)) { 2828 + dwc2_hsotg_complete_request(hsotg, hs_ep, 2829 + get_ep_head(hs_ep), 0); 2830 + 2831 + hs_ep->target_frame = tmp; 2832 + dwc2_gadget_incr_frame_num(hs_ep); 2819 2833 dwc2_gadget_start_isoc_ddma(hs_ep); 2820 2834 return; 2821 2835 } 2822 2836 2837 + hs_ep->target_frame = tmp; 2823 2838 if (hs_ep->interval > 1) { 2824 2839 u32 ctrl = dwc2_readl(hsotg->regs + 2825 2840 DIEPCTL(hs_ep->index)); ··· 2842 2843 get_ep_head(hs_ep), 0); 2843 2844 } 2844 2845 2845 - dwc2_gadget_incr_frame_num(hs_ep); 2846 + if (!using_desc_dma(hsotg)) 2847 + dwc2_gadget_incr_frame_num(hs_ep); 2846 2848 } 2847 2849 2848 2850 /** ··· 2901 2901 2902 2902 /* In DDMA handle isochronous requests separately */ 2903 2903 if (using_desc_dma(hsotg) && hs_ep->isochronous) { 2904 - dwc2_gadget_complete_isoc_request_ddma(hs_ep); 2905 - /* Try to start next isoc request */ 2906 - dwc2_gadget_start_next_isoc_ddma(hs_ep); 2904 + /* XferCompl set along with BNA */ 2905 + if (!(ints & DXEPINT_BNAINTR)) 2906 + dwc2_gadget_complete_isoc_request_ddma(hs_ep); 2907 2907 } else if (dir_in) { 2908 2908 /* 2909 2909 * We get OutDone from the FIFO, so we only ··· 2978 2978 2979 2979 if (ints & DXEPINT_BNAINTR) { 2980 2980 dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__); 2981 - 2982 - /* 2983 - * Try to start next isoc request, if any. 2984 - * Sometimes the endpoint remains enabled after BNA interrupt 2985 - * assertion, which is not expected, hence we can enter here 2986 - * couple of times. 2987 - */ 2988 2981 if (hs_ep->isochronous) 2989 - dwc2_gadget_start_next_isoc_ddma(hs_ep); 2982 + dwc2_gadget_handle_isoc_bna(hs_ep); 2990 2983 } 2991 2984 2992 2985 if (dir_in && !hs_ep->isochronous) { ··· 3190 3197 /** 3191 3198 * dwc2_hsotg_core_init - issue softreset to the core 3192 3199 * @hsotg: The device state 3200 + * @is_usb_reset: Usb resetting flag 3193 3201 * 3194 3202 * Issue a soft reset to the core, and await the core finishing it. 3195 3203 */ ··· 3253 3259 dcfg |= DCFG_DEVSPD_HS; 3254 3260 } 3255 3261 3262 + if (hsotg->params.ipg_isoc_en) 3263 + dcfg |= DCFG_IPG_ISOC_SUPPORDED; 3264 + 3256 3265 dwc2_writel(dcfg, hsotg->regs + DCFG); 3257 3266 3258 3267 /* Clear any pending OTG interrupts */ ··· 3317 3320 hsotg->regs + DOEPMSK); 3318 3321 3319 3322 /* Enable BNA interrupt for DDMA */ 3320 - if (using_desc_dma(hsotg)) 3323 + if (using_desc_dma(hsotg)) { 3321 3324 dwc2_set_bit(hsotg->regs + DOEPMSK, DOEPMSK_BNAMSK); 3325 + dwc2_set_bit(hsotg->regs + DIEPMSK, DIEPMSK_BNAININTRMSK); 3326 + } 3322 3327 3323 3328 dwc2_writel(0, hsotg->regs + DAINTMSK); 3324 3329 ··· 3426 3427 3427 3428 daintmsk = dwc2_readl(hsotg->regs + DAINTMSK); 3428 3429 3429 - for (idx = 1; idx <= hsotg->num_of_eps; idx++) { 3430 + for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3430 3431 hs_ep = hsotg->eps_in[idx]; 3431 3432 /* Proceed only unmasked ISOC EPs */ 3432 3433 if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) ··· 3472 3473 daintmsk = dwc2_readl(hsotg->regs + DAINTMSK); 3473 3474 daintmsk >>= DAINT_OUTEP_SHIFT; 3474 3475 3475 - for (idx = 1; idx <= hsotg->num_of_eps; idx++) { 3476 + for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3476 3477 hs_ep = hsotg->eps_out[idx]; 3477 3478 /* Proceed only unmasked ISOC EPs */ 3478 3479 if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) ··· 3646 3647 dwc2_writel(gintmsk, hsotg->regs + GINTMSK); 3647 3648 3648 3649 dev_dbg(hsotg->dev, "GOUTNakEff triggered\n"); 3649 - for (idx = 1; idx <= hsotg->num_of_eps; idx++) { 3650 + for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3650 3651 hs_ep = hsotg->eps_out[idx]; 3651 3652 /* Proceed only unmasked ISOC EPs */ 3652 3653 if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) ··· 3788 3789 unsigned int dir_in; 3789 3790 unsigned int i, val, size; 3790 3791 int ret = 0; 3792 + unsigned char ep_type; 3791 3793 3792 3794 dev_dbg(hsotg->dev, 3793 3795 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n", ··· 3807 3807 return -EINVAL; 3808 3808 } 3809 3809 3810 + ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 3810 3811 mps = usb_endpoint_maxp(desc); 3811 3812 mc = usb_endpoint_maxp_mult(desc); 3813 + 3814 + /* ISOC IN in DDMA supported bInterval up to 10 */ 3815 + if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC && 3816 + dir_in && desc->bInterval > 10) { 3817 + dev_err(hsotg->dev, 3818 + "%s: ISOC IN, DDMA: bInterval>10 not supported!\n", __func__); 3819 + return -EINVAL; 3820 + } 3821 + 3822 + /* High bandwidth ISOC OUT in DDMA not supported */ 3823 + if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC && 3824 + !dir_in && mc > 1) { 3825 + dev_err(hsotg->dev, 3826 + "%s: ISOC OUT, DDMA: HB not supported!\n", __func__); 3827 + return -EINVAL; 3828 + } 3812 3829 3813 3830 /* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */ 3814 3831 ··· 3867 3850 hs_ep->halted = 0; 3868 3851 hs_ep->interval = desc->bInterval; 3869 3852 3870 - switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 3853 + switch (ep_type) { 3871 3854 case USB_ENDPOINT_XFER_ISOC: 3872 3855 epctrl |= DXEPCTL_EPTYPE_ISO; 3873 3856 epctrl |= DXEPCTL_SETEVENFR; 3874 3857 hs_ep->isochronous = 1; 3875 3858 hs_ep->interval = 1 << (desc->bInterval - 1); 3876 3859 hs_ep->target_frame = TARGET_FRAME_INITIAL; 3877 - hs_ep->isoc_chain_num = 0; 3878 3860 hs_ep->next_desc = 0; 3861 + hs_ep->compl_desc = 0; 3879 3862 if (dir_in) { 3880 3863 hs_ep->periodic = 1; 3881 3864 mask = dwc2_readl(hsotg->regs + DIEPMSK); ··· 4318 4301 /** 4319 4302 * dwc2_hsotg_udc_stop - stop the udc 4320 4303 * @gadget: The usb gadget state 4321 - * @driver: The usb gadget driver 4322 4304 * 4323 4305 * Stop udc hw block and stay tunned for future transmissions 4324 4306 */ ··· 4469 4453 * @hsotg: The device state. 4470 4454 * @hs_ep: The endpoint to be initialised. 4471 4455 * @epnum: The endpoint number 4456 + * @dir_in: True if direction is in. 4472 4457 * 4473 4458 * Initialise the given endpoint (as part of the probe and device state 4474 4459 * creation) to give to the gadget driver. Setup the endpoint name, any ··· 4543 4526 4544 4527 /** 4545 4528 * dwc2_hsotg_hw_cfg - read HW configuration registers 4546 - * @param: The device state 4529 + * @hsotg: Programming view of the DWC_otg controller 4547 4530 * 4548 4531 * Read the USB core HW configuration registers 4549 4532 */ ··· 4599 4582 4600 4583 /** 4601 4584 * dwc2_hsotg_dump - dump state of the udc 4602 - * @param: The device state 4585 + * @hsotg: Programming view of the DWC_otg controller 4586 + * 4603 4587 */ 4604 4588 static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg) 4605 4589 { ··· 4651 4633 4652 4634 /** 4653 4635 * dwc2_gadget_init - init function for gadget 4654 - * @dwc2: The data structure for the DWC2 driver. 4636 + * @hsotg: Programming view of the DWC_otg controller 4637 + * 4655 4638 */ 4656 4639 int dwc2_gadget_init(struct dwc2_hsotg *hsotg) 4657 4640 { ··· 4749 4730 4750 4731 /** 4751 4732 * dwc2_hsotg_remove - remove function for hsotg driver 4752 - * @pdev: The platform information for the driver 4733 + * @hsotg: Programming view of the DWC_otg controller 4734 + * 4753 4735 */ 4754 4736 int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg) 4755 4737 { ··· 5031 5011 * 5032 5012 * @hsotg: Programming view of the DWC_otg controller 5033 5013 * @rem_wakeup: indicates whether resume is initiated by Device or Host. 5034 - * @param reset: indicates whether resume is initiated by Reset. 5014 + * @reset: indicates whether resume is initiated by Reset. 5035 5015 * 5036 5016 * Return non-zero if failed to exit from hibernation. 5037 5017 */
+1 -2
drivers/usb/dwc2/hcd.c
··· 597 597 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination 598 598 * buffer 599 599 * 600 - * @core_if: Programming view of DWC_otg controller 600 + * @hsotg: Programming view of DWC_otg controller 601 601 * @dest: Destination buffer for the packet 602 602 * @bytes: Number of bytes to copy to the destination 603 603 */ ··· 4087 4087 * then the refcount for the structure will go to 0 and we'll free it. 4088 4088 * 4089 4089 * @hsotg: The HCD state structure for the DWC OTG controller. 4090 - * @qh: The QH structure. 4091 4090 * @context: The priv pointer from a struct dwc2_hcd_urb. 4092 4091 * @mem_flags: Flags for allocating memory. 4093 4092 * @ttport: We'll return this device's port number here. That's used to
+9 -5
drivers/usb/dwc2/hcd.h
··· 80 80 * @xfer_count: Number of bytes transferred so far 81 81 * @start_pkt_count: Packet count at start of transfer 82 82 * @xfer_started: True if the transfer has been started 83 - * @ping: True if a PING request should be issued on this channel 83 + * @do_ping: True if a PING request should be issued on this channel 84 84 * @error_state: True if the error count for this transaction is non-zero 85 85 * @halt_on_queue: True if this channel should be halted the next time a 86 86 * request is queued for the channel. This is necessary in ··· 102 102 * @schinfo: Scheduling micro-frame bitmap 103 103 * @ntd: Number of transfer descriptors for the transfer 104 104 * @halt_status: Reason for halting the host channel 105 - * @hcint Contents of the HCINT register when the interrupt came 105 + * @hcint: Contents of the HCINT register when the interrupt came 106 106 * @qh: QH for the transfer being processed by this channel 107 107 * @hc_list_entry: For linking to list of host channels 108 108 * @desc_list_addr: Current QH's descriptor list DMA address ··· 237 237 /** 238 238 * struct dwc2_hs_transfer_time - Info about a transfer on the high speed bus. 239 239 * 240 - * @start_schedule_usecs: The start time on the main bus schedule. Note that 240 + * @start_schedule_us: The start time on the main bus schedule. Note that 241 241 * the main bus schedule is tightly packed and this 242 242 * time should be interpreted as tightly packed (so 243 243 * uFrame 0 starts at 0 us, uFrame 1 starts at 100 us ··· 301 301 * "struct dwc2_tt". Not used if this device is high 302 302 * speed. Note that this is in "schedule slice" which 303 303 * is tightly packed. 304 - * @ls_duration_us: Duration on the low speed bus schedule. 305 304 * @ntd: Actual number of transfer descriptors in a list 306 305 * @qtd_list: List of QTDs for this QH 307 306 * @channel: Host channel currently processing transfers for this QH ··· 314 315 * descriptor 315 316 * @unreserve_timer: Timer for releasing periodic reservation. 316 317 * @wait_timer: Timer used to wait before re-queuing. 317 - * @dwc2_tt: Pointer to our tt info (or NULL if no tt). 318 + * @dwc_tt: Pointer to our tt info (or NULL if no tt). 318 319 * @ttport: Port number within our tt. 319 320 * @tt_buffer_dirty True if clear_tt_buffer_complete is pending 320 321 * @unreserve_pending: True if we planned to unreserve but haven't yet. ··· 324 325 * periodic transfers and is ignored for periodic ones. 325 326 * @wait_timer_cancel: Set to true to cancel the wait_timer. 326 327 * 328 + * @tt_buffer_dirty: True if EP's TT buffer is not clean. 327 329 * A Queue Head (QH) holds the static characteristics of an endpoint and 328 330 * maintains a list of transfers (QTDs) for that endpoint. A QH structure may 329 331 * be entered in either the non-periodic or periodic schedule. ··· 400 400 * @urb: URB for this transfer 401 401 * @qh: Queue head for this QTD 402 402 * @qtd_list_entry: For linking to the QH's list of QTDs 403 + * @isoc_td_first: Index of first activated isochronous transfer 404 + * descriptor in Descriptor DMA mode 405 + * @isoc_td_last: Index of last activated isochronous transfer 406 + * descriptor in Descriptor DMA mode 403 407 * 404 408 * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control, 405 409 * interrupt, or isochronous transfer. A single QTD is created for each URB
+1
drivers/usb/dwc2/hcd_ddma.c
··· 332 332 * 333 333 * @hsotg: The HCD state structure for the DWC OTG controller 334 334 * @qh: The QH to init 335 + * @mem_flags: Indicates the type of memory allocation 335 336 * 336 337 * Return: 0 if successful, negative error code otherwise 337 338 *
+12
drivers/usb/dwc2/hcd_intr.c
··· 478 478 * of the URB based on the number of bytes transferred via the host channel. 479 479 * Sets the URB status if the data transfer is finished. 480 480 * 481 + * @hsotg: Programming view of the DWC_otg controller 482 + * @chan: Programming view of host channel 483 + * @chnum: Channel number 484 + * @urb: Processing URB 485 + * @qtd: Queue transfer descriptor 486 + * 481 487 * Return: 1 if the data transfer specified by the URB is completely finished, 482 488 * 0 otherwise 483 489 */ ··· 571 565 * the frame descriptor array are set based on the transfer state and the input 572 566 * halt_status. Completes the Isochronous URB if all the URB frames have been 573 567 * completed. 568 + * 569 + * @hsotg: Programming view of the DWC_otg controller 570 + * @chan: Programming view of host channel 571 + * @chnum: Channel number 572 + * @halt_status: Reason for halting a host channel 573 + * @qtd: Queue transfer descriptor 574 574 * 575 575 * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be 576 576 * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
+3 -2
drivers/usb/dwc2/hcd_queue.c
··· 679 679 * 680 680 * @hsotg: The HCD state structure for the DWC OTG controller. 681 681 * @qh: QH for the periodic transfer. 682 + * @index: Transfer index 682 683 */ 683 684 static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg, 684 685 struct dwc2_qh *qh, int index) ··· 1277 1276 * release the reservation. This worker is called after the appropriate 1278 1277 * delay. 1279 1278 * 1280 - * @work: Pointer to a qh unreserve_work. 1279 + * @t: Address to a qh unreserve_work. 1281 1280 */ 1282 1281 static void dwc2_unreserve_timer_fn(struct timer_list *t) 1283 1282 { ··· 1632 1631 * @hsotg: The HCD state structure for the DWC OTG controller 1633 1632 * @urb: Holds the information about the device/endpoint needed 1634 1633 * to initialize the QH 1635 - * @atomic_alloc: Flag to do atomic allocation if needed 1634 + * @mem_flags: Flags for allocating memory. 1636 1635 * 1637 1636 * Return: Pointer to the newly allocated QH, or NULL on error 1638 1637 */
+2
drivers/usb/dwc2/hw.h
··· 311 311 #define GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK (0x3 << 14) 312 312 #define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14 313 313 #define GHWCFG4_ACG_SUPPORTED BIT(12) 314 + #define GHWCFG4_IPG_ISOC_SUPPORTED BIT(11) 314 315 #define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0 315 316 #define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1 316 317 #define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2 ··· 425 424 #define DCFG_EPMISCNT_SHIFT 18 426 425 #define DCFG_EPMISCNT_LIMIT 0x1f 427 426 #define DCFG_EPMISCNT(_x) ((_x) << 18) 427 + #define DCFG_IPG_ISOC_SUPPORDED BIT(17) 428 428 #define DCFG_PERFRINT_MASK (0x3 << 11) 429 429 #define DCFG_PERFRINT_SHIFT 11 430 430 #define DCFG_PERFRINT_LIMIT 0x3
+13 -1
drivers/usb/dwc2/params.c
··· 70 70 GAHBCFG_HBSTLEN_SHIFT; 71 71 p->uframe_sched = false; 72 72 p->change_speed_quirk = true; 73 + p->power_down = false; 73 74 } 74 75 75 76 static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg) ··· 270 269 /** 271 270 * dwc2_set_default_params() - Set all core parameters to their 272 271 * auto-detected default values. 272 + * 273 + * @hsotg: Programming view of the DWC_otg controller 274 + * 273 275 */ 274 276 static void dwc2_set_default_params(struct dwc2_hsotg *hsotg) 275 277 { ··· 302 298 p->besl = true; 303 299 p->hird_threshold_en = true; 304 300 p->hird_threshold = 4; 301 + p->ipg_isoc_en = false; 305 302 p->max_packet_count = hw->max_packet_count; 306 303 p->max_transfer_size = hw->max_transfer_size; 307 304 p->ahbcfg = GAHBCFG_HBSTLEN_INCR << GAHBCFG_HBSTLEN_SHIFT; ··· 342 337 343 338 /** 344 339 * dwc2_get_device_properties() - Read in device properties. 340 + * 341 + * @hsotg: Programming view of the DWC_otg controller 345 342 * 346 343 * Read in the device properties and adjust core parameters if needed. 347 344 */ ··· 556 549 } 557 550 558 551 #define CHECK_RANGE(_param, _min, _max, _def) do { \ 559 - if ((hsotg->params._param) < (_min) || \ 552 + if ((int)(hsotg->params._param) < (_min) || \ 560 553 (hsotg->params._param) > (_max)) { \ 561 554 dev_warn(hsotg->dev, "%s: Invalid parameter %s=%d\n", \ 562 555 __func__, #_param, hsotg->params._param); \ ··· 586 579 CHECK_BOOL(enable_dynamic_fifo, hw->enable_dynamic_fifo); 587 580 CHECK_BOOL(en_multiple_tx_fifo, hw->en_multiple_tx_fifo); 588 581 CHECK_BOOL(i2c_enable, hw->i2c_enable); 582 + CHECK_BOOL(ipg_isoc_en, hw->ipg_isoc_en); 589 583 CHECK_BOOL(acg_enable, hw->acg_enable); 590 584 CHECK_BOOL(reload_ctl, (hsotg->hw_params.snpsid > DWC2_CORE_REV_2_92a)); 591 585 CHECK_BOOL(lpm, (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_80a)); ··· 696 688 /** 697 689 * During device initialization, read various hardware configuration 698 690 * registers and interpret the contents. 691 + * 692 + * @hsotg: Programming view of the DWC_otg controller 693 + * 699 694 */ 700 695 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) 701 696 { ··· 783 772 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> 784 773 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT; 785 774 hw->acg_enable = !!(hwcfg4 & GHWCFG4_ACG_SUPPORTED); 775 + hw->ipg_isoc_en = !!(hwcfg4 & GHWCFG4_IPG_ISOC_SUPPORTED); 786 776 787 777 /* fifo sizes */ 788 778 hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
+6
drivers/usb/dwc2/pci.c
··· 77 77 return 0; 78 78 } 79 79 80 + /** 81 + * dwc2_pci_probe() - Provides the cleanup entry points for the DWC_otg PCI 82 + * driver 83 + * 84 + * @pci: The programming view of DWC_otg PCI 85 + */ 80 86 static void dwc2_pci_remove(struct pci_dev *pci) 81 87 { 82 88 struct dwc2_pci_glue *glue = pci_get_drvdata(pci);
+12
drivers/usb/dwc3/Kconfig
··· 106 106 inside (i.e. STiH407). 107 107 Say 'Y' or 'M' if you have one such device. 108 108 109 + config USB_DWC3_QCOM 110 + tristate "Qualcomm Platform" 111 + depends on ARCH_QCOM || COMPILE_TEST 112 + depends on OF 113 + default USB_DWC3 114 + help 115 + Some Qualcomm SoCs use DesignWare Core IP for USB2/3 116 + functionality. 117 + This driver also handles Qscratch wrapper which is needed 118 + for peripheral mode support. 119 + Say 'Y' or 'M' if you have one such device. 120 + 109 121 endif
+1
drivers/usb/dwc3/Makefile
··· 48 48 obj-$(CONFIG_USB_DWC3_KEYSTONE) += dwc3-keystone.o 49 49 obj-$(CONFIG_USB_DWC3_OF_SIMPLE) += dwc3-of-simple.o 50 50 obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o 51 + obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o
+126 -28
drivers/usb/dwc3/core.c
··· 8 8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 9 9 */ 10 10 11 + #include <linux/clk.h> 11 12 #include <linux/version.h> 12 13 #include <linux/module.h> 13 14 #include <linux/kernel.h> ··· 25 24 #include <linux/of.h> 26 25 #include <linux/acpi.h> 27 26 #include <linux/pinctrl/consumer.h> 27 + #include <linux/reset.h> 28 28 29 29 #include <linux/usb/ch9.h> 30 30 #include <linux/usb/gadget.h> ··· 267 265 268 266 return 0; 269 267 } 268 + 269 + static const struct clk_bulk_data dwc3_core_clks[] = { 270 + { .id = "ref" }, 271 + { .id = "bus_early" }, 272 + { .id = "suspend" }, 273 + }; 270 274 271 275 /* 272 276 * dwc3_frame_length_adjustment - Adjusts frame length if required ··· 675 667 usb_phy_set_suspend(dwc->usb3_phy, 1); 676 668 phy_power_off(dwc->usb2_generic_phy); 677 669 phy_power_off(dwc->usb3_generic_phy); 670 + clk_bulk_disable(dwc->num_clks, dwc->clks); 671 + clk_bulk_unprepare(dwc->num_clks, dwc->clks); 672 + reset_control_assert(dwc->reset); 678 673 } 679 674 680 675 static bool dwc3_core_is_valid(struct dwc3 *dwc) ··· 1256 1245 static int dwc3_probe(struct platform_device *pdev) 1257 1246 { 1258 1247 struct device *dev = &pdev->dev; 1259 - struct resource *res; 1248 + struct resource *res, dwc_res; 1260 1249 struct dwc3 *dwc; 1261 1250 1262 1251 int ret; ··· 1267 1256 if (!dwc) 1268 1257 return -ENOMEM; 1269 1258 1259 + dwc->clks = devm_kmemdup(dev, dwc3_core_clks, sizeof(dwc3_core_clks), 1260 + GFP_KERNEL); 1261 + if (!dwc->clks) 1262 + return -ENOMEM; 1263 + 1264 + dwc->num_clks = ARRAY_SIZE(dwc3_core_clks); 1270 1265 dwc->dev = dev; 1271 1266 1272 1267 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 1287 1270 dwc->xhci_resources[0].flags = res->flags; 1288 1271 dwc->xhci_resources[0].name = res->name; 1289 1272 1290 - res->start += DWC3_GLOBALS_REGS_START; 1291 - 1292 1273 /* 1293 1274 * Request memory region but exclude xHCI regs, 1294 1275 * since it will be requested by the xhci-plat driver. 1295 1276 */ 1296 - regs = devm_ioremap_resource(dev, res); 1297 - if (IS_ERR(regs)) { 1298 - ret = PTR_ERR(regs); 1299 - goto err0; 1300 - } 1277 + dwc_res = *res; 1278 + dwc_res.start += DWC3_GLOBALS_REGS_START; 1279 + 1280 + regs = devm_ioremap_resource(dev, &dwc_res); 1281 + if (IS_ERR(regs)) 1282 + return PTR_ERR(regs); 1301 1283 1302 1284 dwc->regs = regs; 1303 - dwc->regs_size = resource_size(res); 1285 + dwc->regs_size = resource_size(&dwc_res); 1304 1286 1305 1287 dwc3_get_properties(dwc); 1288 + 1289 + dwc->reset = devm_reset_control_get_optional_shared(dev, NULL); 1290 + if (IS_ERR(dwc->reset)) 1291 + return PTR_ERR(dwc->reset); 1292 + 1293 + ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks); 1294 + if (ret == -EPROBE_DEFER) 1295 + return ret; 1296 + /* 1297 + * Clocks are optional, but new DT platforms should support all clocks 1298 + * as required by the DT-binding. 1299 + */ 1300 + if (ret) 1301 + dwc->num_clks = 0; 1302 + 1303 + ret = reset_control_deassert(dwc->reset); 1304 + if (ret) 1305 + goto put_clks; 1306 + 1307 + ret = clk_bulk_prepare(dwc->num_clks, dwc->clks); 1308 + if (ret) 1309 + goto assert_reset; 1310 + 1311 + ret = clk_bulk_enable(dwc->num_clks, dwc->clks); 1312 + if (ret) 1313 + goto unprepare_clks; 1306 1314 1307 1315 platform_set_drvdata(pdev, dwc); 1308 1316 dwc3_cache_hwparams(dwc); ··· 1392 1350 pm_runtime_put_sync(&pdev->dev); 1393 1351 pm_runtime_disable(&pdev->dev); 1394 1352 1395 - err0: 1396 - /* 1397 - * restore res->start back to its original value so that, in case the 1398 - * probe is deferred, we don't end up getting error in request the 1399 - * memory region the next time probe is called. 1400 - */ 1401 - res->start -= DWC3_GLOBALS_REGS_START; 1353 + clk_bulk_disable(dwc->num_clks, dwc->clks); 1354 + unprepare_clks: 1355 + clk_bulk_unprepare(dwc->num_clks, dwc->clks); 1356 + assert_reset: 1357 + reset_control_assert(dwc->reset); 1358 + put_clks: 1359 + clk_bulk_put(dwc->num_clks, dwc->clks); 1402 1360 1403 1361 return ret; 1404 1362 } ··· 1406 1364 static int dwc3_remove(struct platform_device *pdev) 1407 1365 { 1408 1366 struct dwc3 *dwc = platform_get_drvdata(pdev); 1409 - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1410 1367 1411 1368 pm_runtime_get_sync(&pdev->dev); 1412 - /* 1413 - * restore res->start back to its original value so that, in case the 1414 - * probe is deferred, we don't end up getting error in request the 1415 - * memory region the next time probe is called. 1416 - */ 1417 - res->start -= DWC3_GLOBALS_REGS_START; 1418 1369 1419 1370 dwc3_debugfs_exit(dwc); 1420 1371 dwc3_core_exit_mode(dwc); ··· 1421 1386 1422 1387 dwc3_free_event_buffers(dwc); 1423 1388 dwc3_free_scratch_buffers(dwc); 1389 + clk_bulk_put(dwc->num_clks, dwc->clks); 1424 1390 1425 1391 return 0; 1426 1392 } 1427 1393 1428 1394 #ifdef CONFIG_PM 1395 + static int dwc3_core_init_for_resume(struct dwc3 *dwc) 1396 + { 1397 + int ret; 1398 + 1399 + ret = reset_control_deassert(dwc->reset); 1400 + if (ret) 1401 + return ret; 1402 + 1403 + ret = clk_bulk_prepare(dwc->num_clks, dwc->clks); 1404 + if (ret) 1405 + goto assert_reset; 1406 + 1407 + ret = clk_bulk_enable(dwc->num_clks, dwc->clks); 1408 + if (ret) 1409 + goto unprepare_clks; 1410 + 1411 + ret = dwc3_core_init(dwc); 1412 + if (ret) 1413 + goto disable_clks; 1414 + 1415 + return 0; 1416 + 1417 + disable_clks: 1418 + clk_bulk_disable(dwc->num_clks, dwc->clks); 1419 + unprepare_clks: 1420 + clk_bulk_unprepare(dwc->num_clks, dwc->clks); 1421 + assert_reset: 1422 + reset_control_assert(dwc->reset); 1423 + 1424 + return ret; 1425 + } 1426 + 1429 1427 static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) 1430 1428 { 1431 1429 unsigned long flags; 1430 + u32 reg; 1432 1431 1433 1432 switch (dwc->current_dr_role) { 1434 1433 case DWC3_GCTL_PRTCAP_DEVICE: ··· 1472 1403 dwc3_core_exit(dwc); 1473 1404 break; 1474 1405 case DWC3_GCTL_PRTCAP_HOST: 1475 - /* do nothing during host runtime_suspend */ 1476 - if (!PMSG_IS_AUTO(msg)) 1406 + if (!PMSG_IS_AUTO(msg)) { 1477 1407 dwc3_core_exit(dwc); 1408 + break; 1409 + } 1410 + 1411 + /* Let controller to suspend HSPHY before PHY driver suspends */ 1412 + if (dwc->dis_u2_susphy_quirk || 1413 + dwc->dis_enblslpm_quirk) { 1414 + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 1415 + reg |= DWC3_GUSB2PHYCFG_ENBLSLPM | 1416 + DWC3_GUSB2PHYCFG_SUSPHY; 1417 + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 1418 + 1419 + /* Give some time for USB2 PHY to suspend */ 1420 + usleep_range(5000, 6000); 1421 + } 1422 + 1423 + phy_pm_runtime_put_sync(dwc->usb2_generic_phy); 1424 + phy_pm_runtime_put_sync(dwc->usb3_generic_phy); 1478 1425 break; 1479 1426 case DWC3_GCTL_PRTCAP_OTG: 1480 1427 /* do nothing during runtime_suspend */ ··· 1518 1433 { 1519 1434 unsigned long flags; 1520 1435 int ret; 1436 + u32 reg; 1521 1437 1522 1438 switch (dwc->current_dr_role) { 1523 1439 case DWC3_GCTL_PRTCAP_DEVICE: 1524 - ret = dwc3_core_init(dwc); 1440 + ret = dwc3_core_init_for_resume(dwc); 1525 1441 if (ret) 1526 1442 return ret; 1527 1443 ··· 1532 1446 spin_unlock_irqrestore(&dwc->lock, flags); 1533 1447 break; 1534 1448 case DWC3_GCTL_PRTCAP_HOST: 1535 - /* nothing to do on host runtime_resume */ 1536 1449 if (!PMSG_IS_AUTO(msg)) { 1537 - ret = dwc3_core_init(dwc); 1450 + ret = dwc3_core_init_for_resume(dwc); 1538 1451 if (ret) 1539 1452 return ret; 1540 1453 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); 1454 + break; 1541 1455 } 1456 + /* Restore GUSB2PHYCFG bits that were modified in suspend */ 1457 + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 1458 + if (dwc->dis_u2_susphy_quirk) 1459 + reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 1460 + 1461 + if (dwc->dis_enblslpm_quirk) 1462 + reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 1463 + 1464 + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 1465 + 1466 + phy_pm_runtime_get_sync(dwc->usb2_generic_phy); 1467 + phy_pm_runtime_get_sync(dwc->usb3_generic_phy); 1542 1468 break; 1543 1469 case DWC3_GCTL_PRTCAP_OTG: 1544 1470 /* nothing to do on runtime_resume */
+16 -9
drivers/usb/dwc3/core.h
··· 639 639 * @resource_index: Resource transfer index 640 640 * @frame_number: set to the frame number we want this transfer to start (ISOC) 641 641 * @interval: the interval on which the ISOC transfer is started 642 - * @allocated_requests: number of requests allocated 643 - * @queued_requests: number of requests queued for transfer 644 642 * @name: a human readable name e.g. ep1out-bulk 645 643 * @direction: true for TX, false for RX 646 644 * @stream_capable: true when streams are enabled ··· 662 664 #define DWC3_EP_ENABLED BIT(0) 663 665 #define DWC3_EP_STALL BIT(1) 664 666 #define DWC3_EP_WEDGE BIT(2) 665 - #define DWC3_EP_BUSY BIT(4) 667 + #define DWC3_EP_TRANSFER_STARTED BIT(3) 666 668 #define DWC3_EP_PENDING_REQUEST BIT(5) 667 - #define DWC3_EP_MISSED_ISOC BIT(6) 668 669 #define DWC3_EP_END_TRANSFER_PENDING BIT(7) 669 - #define DWC3_EP_TRANSFER_STARTED BIT(8) 670 670 671 671 /* This last one is specific to EP0 */ 672 672 #define DWC3_EP0_DIR_IN BIT(31) ··· 684 688 u8 number; 685 689 u8 type; 686 690 u8 resource_index; 687 - u32 allocated_requests; 688 - u32 queued_requests; 689 691 u32 frame_number; 690 692 u32 interval; 691 693 ··· 826 832 * @list: a list_head used for request queueing 827 833 * @dep: struct dwc3_ep owning this request 828 834 * @sg: pointer to first incomplete sg 835 + * @start_sg: pointer to the sg which should be queued next 829 836 * @num_pending_sgs: counter to pending sgs 837 + * @num_queued_sgs: counter to the number of sgs which already got queued 830 838 * @remaining: amount of data remaining 831 839 * @epnum: endpoint number to which this request refers 832 840 * @trb: pointer to struct dwc3_trb ··· 844 848 struct list_head list; 845 849 struct dwc3_ep *dep; 846 850 struct scatterlist *sg; 851 + struct scatterlist *start_sg; 847 852 848 853 unsigned num_pending_sgs; 854 + unsigned int num_queued_sgs; 849 855 unsigned remaining; 850 856 u8 epnum; 851 857 struct dwc3_trb *trb; ··· 889 891 * @eps: endpoint array 890 892 * @gadget: device side representation of the peripheral controller 891 893 * @gadget_driver: pointer to the gadget driver 894 + * @clks: array of clocks 895 + * @num_clks: number of clocks 896 + * @reset: reset control 892 897 * @regs: base address for our registers 893 898 * @regs_size: address space size 894 899 * @fladj: frame length adjustment ··· 1013 1012 1014 1013 struct usb_gadget gadget; 1015 1014 struct usb_gadget_driver *gadget_driver; 1015 + 1016 + struct clk_bulk_data *clks; 1017 + int num_clks; 1018 + 1019 + struct reset_control *reset; 1016 1020 1017 1021 struct usb_phy *usb2_phy; 1018 1022 struct usb_phy *usb3_phy; ··· 1203 1197 /* Within XferNotReady */ 1204 1198 #define DEPEVT_STATUS_TRANSFER_ACTIVE BIT(3) 1205 1199 1206 - /* Within XferComplete */ 1200 + /* Within XferComplete or XferInProgress */ 1207 1201 #define DEPEVT_STATUS_BUSERR BIT(0) 1208 1202 #define DEPEVT_STATUS_SHORT BIT(1) 1209 1203 #define DEPEVT_STATUS_IOC BIT(2) 1210 - #define DEPEVT_STATUS_LST BIT(3) 1204 + #define DEPEVT_STATUS_LST BIT(3) /* XferComplete */ 1205 + #define DEPEVT_STATUS_MISSED_ISOC BIT(3) /* XferInProgress */ 1211 1206 1212 1207 /* Stream event only */ 1213 1208 #define DEPEVT_STREAMEVT_FOUND 1
+21 -5
drivers/usb/dwc3/debug.h
··· 475 475 if (ret < 0) 476 476 return "UNKNOWN"; 477 477 478 + status = event->status; 479 + 478 480 switch (event->endpoint_event) { 479 481 case DWC3_DEPEVT_XFERCOMPLETE: 480 - strcat(str, "Transfer Complete"); 482 + len = strlen(str); 483 + sprintf(str + len, "Transfer Complete (%c%c%c)", 484 + status & DEPEVT_STATUS_SHORT ? 'S' : 's', 485 + status & DEPEVT_STATUS_IOC ? 'I' : 'i', 486 + status & DEPEVT_STATUS_LST ? 'L' : 'l'); 487 + 481 488 len = strlen(str); 482 489 483 490 if (epnum <= 1) 484 491 sprintf(str + len, " [%s]", dwc3_ep0_state_string(ep0state)); 485 492 break; 486 493 case DWC3_DEPEVT_XFERINPROGRESS: 487 - strcat(str, "Transfer In-Progress"); 494 + len = strlen(str); 495 + 496 + sprintf(str + len, "Transfer In Progress [%d] (%c%c%c)", 497 + event->parameters, 498 + status & DEPEVT_STATUS_SHORT ? 'S' : 's', 499 + status & DEPEVT_STATUS_IOC ? 'I' : 'i', 500 + status & DEPEVT_STATUS_LST ? 'M' : 'm'); 488 501 break; 489 502 case DWC3_DEPEVT_XFERNOTREADY: 490 - strcat(str, "Transfer Not Ready"); 491 - status = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE; 492 - strcat(str, status ? " (Active)" : " (Not Active)"); 503 + len = strlen(str); 504 + 505 + sprintf(str + len, "Transfer Not Ready [%d]%s", 506 + event->parameters, 507 + status & DEPEVT_STATUS_TRANSFER_ACTIVE ? 508 + " (Active)" : " (Not Active)"); 493 509 494 510 /* Control Endpoints */ 495 511 if (epnum <= 1) {
+28 -6
drivers/usb/dwc3/drd.c
··· 8 8 */ 9 9 10 10 #include <linux/extcon.h> 11 + #include <linux/of_graph.h> 11 12 #include <linux/platform_device.h> 12 13 13 14 #include "debug.h" ··· 440 439 return NOTIFY_DONE; 441 440 } 442 441 442 + static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) 443 + { 444 + struct device *dev = dwc->dev; 445 + struct device_node *np_phy, *np_conn; 446 + struct extcon_dev *edev; 447 + 448 + if (of_property_read_bool(dev->of_node, "extcon")) 449 + return extcon_get_edev_by_phandle(dwc->dev, 0); 450 + 451 + np_phy = of_parse_phandle(dev->of_node, "phys", 0); 452 + np_conn = of_graph_get_remote_node(np_phy, -1, -1); 453 + 454 + if (np_conn) 455 + edev = extcon_find_edev_by_node(np_conn); 456 + else 457 + edev = NULL; 458 + 459 + of_node_put(np_conn); 460 + of_node_put(np_phy); 461 + 462 + return edev; 463 + } 464 + 443 465 int dwc3_drd_init(struct dwc3 *dwc) 444 466 { 445 467 int ret, irq; 446 468 447 - if (dwc->dev->of_node && 448 - of_property_read_bool(dwc->dev->of_node, "extcon")) { 449 - dwc->edev = extcon_get_edev_by_phandle(dwc->dev, 0); 469 + dwc->edev = dwc3_get_extcon(dwc); 470 + if (IS_ERR(dwc->edev)) 471 + return PTR_ERR(dwc->edev); 450 472 451 - if (IS_ERR(dwc->edev)) 452 - return PTR_ERR(dwc->edev); 453 - 473 + if (dwc->edev) { 454 474 dwc->edev_nb.notifier_call = dwc3_drd_notifier; 455 475 ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST, 456 476 &dwc->edev_nb);
+1 -1
drivers/usb/dwc3/dwc3-of-simple.c
··· 208 208 }; 209 209 210 210 static const struct of_device_id of_dwc3_simple_match[] = { 211 - { .compatible = "qcom,dwc3" }, 212 211 { .compatible = "rockchip,rk3399-dwc3" }, 213 212 { .compatible = "xlnx,zynqmp-dwc3" }, 214 213 { .compatible = "cavium,octeon-7130-usb-uctl" }, 215 214 { .compatible = "sprd,sc9860-dwc3" }, 216 215 { .compatible = "amlogic,meson-axg-dwc3" }, 217 216 { .compatible = "amlogic,meson-gxl-dwc3" }, 217 + { .compatible = "allwinner,sun50i-h6-dwc3" }, 218 218 { /* Sentinel */ } 219 219 }; 220 220 MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
+620
drivers/usb/dwc3/dwc3-qcom.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2018, The Linux Foundation. All rights reserved. 3 + * 4 + * Inspired by dwc3-of-simple.c 5 + */ 6 + #define DEBUG 7 + 8 + #include <linux/io.h> 9 + #include <linux/of.h> 10 + #include <linux/clk.h> 11 + #include <linux/irq.h> 12 + #include <linux/clk-provider.h> 13 + #include <linux/module.h> 14 + #include <linux/kernel.h> 15 + #include <linux/extcon.h> 16 + #include <linux/of_platform.h> 17 + #include <linux/platform_device.h> 18 + #include <linux/phy/phy.h> 19 + #include <linux/usb/of.h> 20 + #include <linux/reset.h> 21 + #include <linux/iopoll.h> 22 + 23 + #include "core.h" 24 + 25 + /* USB QSCRATCH Hardware registers */ 26 + #define QSCRATCH_HS_PHY_CTRL 0x10 27 + #define UTMI_OTG_VBUS_VALID BIT(20) 28 + #define SW_SESSVLD_SEL BIT(28) 29 + 30 + #define QSCRATCH_SS_PHY_CTRL 0x30 31 + #define LANE0_PWR_PRESENT BIT(24) 32 + 33 + #define QSCRATCH_GENERAL_CFG 0x08 34 + #define PIPE_UTMI_CLK_SEL BIT(0) 35 + #define PIPE3_PHYSTATUS_SW BIT(3) 36 + #define PIPE_UTMI_CLK_DIS BIT(8) 37 + 38 + #define PWR_EVNT_IRQ_STAT_REG 0x58 39 + #define PWR_EVNT_LPM_IN_L2_MASK BIT(4) 40 + #define PWR_EVNT_LPM_OUT_L2_MASK BIT(5) 41 + 42 + struct dwc3_qcom { 43 + struct device *dev; 44 + void __iomem *qscratch_base; 45 + struct platform_device *dwc3; 46 + struct clk **clks; 47 + int num_clocks; 48 + struct reset_control *resets; 49 + 50 + int hs_phy_irq; 51 + int dp_hs_phy_irq; 52 + int dm_hs_phy_irq; 53 + int ss_phy_irq; 54 + 55 + struct extcon_dev *edev; 56 + struct extcon_dev *host_edev; 57 + struct notifier_block vbus_nb; 58 + struct notifier_block host_nb; 59 + 60 + enum usb_dr_mode mode; 61 + bool is_suspended; 62 + bool pm_suspended; 63 + }; 64 + 65 + static inline void dwc3_qcom_setbits(void __iomem *base, u32 offset, u32 val) 66 + { 67 + u32 reg; 68 + 69 + reg = readl(base + offset); 70 + reg |= val; 71 + writel(reg, base + offset); 72 + 73 + /* ensure that above write is through */ 74 + readl(base + offset); 75 + } 76 + 77 + static inline void dwc3_qcom_clrbits(void __iomem *base, u32 offset, u32 val) 78 + { 79 + u32 reg; 80 + 81 + reg = readl(base + offset); 82 + reg &= ~val; 83 + writel(reg, base + offset); 84 + 85 + /* ensure that above write is through */ 86 + readl(base + offset); 87 + } 88 + 89 + static void dwc3_qcom_vbus_overrride_enable(struct dwc3_qcom *qcom, bool enable) 90 + { 91 + if (enable) { 92 + dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL, 93 + LANE0_PWR_PRESENT); 94 + dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_HS_PHY_CTRL, 95 + UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL); 96 + } else { 97 + dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL, 98 + LANE0_PWR_PRESENT); 99 + dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_HS_PHY_CTRL, 100 + UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL); 101 + } 102 + } 103 + 104 + static int dwc3_qcom_vbus_notifier(struct notifier_block *nb, 105 + unsigned long event, void *ptr) 106 + { 107 + struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, vbus_nb); 108 + 109 + /* enable vbus override for device mode */ 110 + dwc3_qcom_vbus_overrride_enable(qcom, event); 111 + qcom->mode = event ? USB_DR_MODE_PERIPHERAL : USB_DR_MODE_HOST; 112 + 113 + return NOTIFY_DONE; 114 + } 115 + 116 + static int dwc3_qcom_host_notifier(struct notifier_block *nb, 117 + unsigned long event, void *ptr) 118 + { 119 + struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, host_nb); 120 + 121 + /* disable vbus override in host mode */ 122 + dwc3_qcom_vbus_overrride_enable(qcom, !event); 123 + qcom->mode = event ? USB_DR_MODE_HOST : USB_DR_MODE_PERIPHERAL; 124 + 125 + return NOTIFY_DONE; 126 + } 127 + 128 + static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom) 129 + { 130 + struct device *dev = qcom->dev; 131 + struct extcon_dev *host_edev; 132 + int ret; 133 + 134 + if (!of_property_read_bool(dev->of_node, "extcon")) 135 + return 0; 136 + 137 + qcom->edev = extcon_get_edev_by_phandle(dev, 0); 138 + if (IS_ERR(qcom->edev)) 139 + return PTR_ERR(qcom->edev); 140 + 141 + qcom->vbus_nb.notifier_call = dwc3_qcom_vbus_notifier; 142 + 143 + qcom->host_edev = extcon_get_edev_by_phandle(dev, 1); 144 + if (IS_ERR(qcom->host_edev)) 145 + qcom->host_edev = NULL; 146 + 147 + ret = devm_extcon_register_notifier(dev, qcom->edev, EXTCON_USB, 148 + &qcom->vbus_nb); 149 + if (ret < 0) { 150 + dev_err(dev, "VBUS notifier register failed\n"); 151 + return ret; 152 + } 153 + 154 + if (qcom->host_edev) 155 + host_edev = qcom->host_edev; 156 + else 157 + host_edev = qcom->edev; 158 + 159 + qcom->host_nb.notifier_call = dwc3_qcom_host_notifier; 160 + ret = devm_extcon_register_notifier(dev, host_edev, EXTCON_USB_HOST, 161 + &qcom->host_nb); 162 + if (ret < 0) { 163 + dev_err(dev, "Host notifier register failed\n"); 164 + return ret; 165 + } 166 + 167 + /* Update initial VBUS override based on extcon state */ 168 + if (extcon_get_state(qcom->edev, EXTCON_USB) || 169 + !extcon_get_state(host_edev, EXTCON_USB_HOST)) 170 + dwc3_qcom_vbus_notifier(&qcom->vbus_nb, true, qcom->edev); 171 + else 172 + dwc3_qcom_vbus_notifier(&qcom->vbus_nb, false, qcom->edev); 173 + 174 + return 0; 175 + } 176 + 177 + static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom) 178 + { 179 + if (qcom->hs_phy_irq) { 180 + disable_irq_wake(qcom->hs_phy_irq); 181 + disable_irq_nosync(qcom->hs_phy_irq); 182 + } 183 + 184 + if (qcom->dp_hs_phy_irq) { 185 + disable_irq_wake(qcom->dp_hs_phy_irq); 186 + disable_irq_nosync(qcom->dp_hs_phy_irq); 187 + } 188 + 189 + if (qcom->dm_hs_phy_irq) { 190 + disable_irq_wake(qcom->dm_hs_phy_irq); 191 + disable_irq_nosync(qcom->dm_hs_phy_irq); 192 + } 193 + 194 + if (qcom->ss_phy_irq) { 195 + disable_irq_wake(qcom->ss_phy_irq); 196 + disable_irq_nosync(qcom->ss_phy_irq); 197 + } 198 + } 199 + 200 + static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom) 201 + { 202 + if (qcom->hs_phy_irq) { 203 + enable_irq(qcom->hs_phy_irq); 204 + enable_irq_wake(qcom->hs_phy_irq); 205 + } 206 + 207 + if (qcom->dp_hs_phy_irq) { 208 + enable_irq(qcom->dp_hs_phy_irq); 209 + enable_irq_wake(qcom->dp_hs_phy_irq); 210 + } 211 + 212 + if (qcom->dm_hs_phy_irq) { 213 + enable_irq(qcom->dm_hs_phy_irq); 214 + enable_irq_wake(qcom->dm_hs_phy_irq); 215 + } 216 + 217 + if (qcom->ss_phy_irq) { 218 + enable_irq(qcom->ss_phy_irq); 219 + enable_irq_wake(qcom->ss_phy_irq); 220 + } 221 + } 222 + 223 + static int dwc3_qcom_suspend(struct dwc3_qcom *qcom) 224 + { 225 + u32 val; 226 + int i; 227 + 228 + if (qcom->is_suspended) 229 + return 0; 230 + 231 + val = readl(qcom->qscratch_base + PWR_EVNT_IRQ_STAT_REG); 232 + if (!(val & PWR_EVNT_LPM_IN_L2_MASK)) 233 + dev_err(qcom->dev, "HS-PHY not in L2\n"); 234 + 235 + for (i = qcom->num_clocks - 1; i >= 0; i--) 236 + clk_disable_unprepare(qcom->clks[i]); 237 + 238 + qcom->is_suspended = true; 239 + dwc3_qcom_enable_interrupts(qcom); 240 + 241 + return 0; 242 + } 243 + 244 + static int dwc3_qcom_resume(struct dwc3_qcom *qcom) 245 + { 246 + int ret; 247 + int i; 248 + 249 + if (!qcom->is_suspended) 250 + return 0; 251 + 252 + dwc3_qcom_disable_interrupts(qcom); 253 + 254 + for (i = 0; i < qcom->num_clocks; i++) { 255 + ret = clk_prepare_enable(qcom->clks[i]); 256 + if (ret < 0) { 257 + while (--i >= 0) 258 + clk_disable_unprepare(qcom->clks[i]); 259 + return ret; 260 + } 261 + } 262 + 263 + /* Clear existing events from PHY related to L2 in/out */ 264 + dwc3_qcom_setbits(qcom->qscratch_base, PWR_EVNT_IRQ_STAT_REG, 265 + PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK); 266 + 267 + qcom->is_suspended = false; 268 + 269 + return 0; 270 + } 271 + 272 + static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data) 273 + { 274 + struct dwc3_qcom *qcom = data; 275 + struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3); 276 + 277 + /* If pm_suspended then let pm_resume take care of resuming h/w */ 278 + if (qcom->pm_suspended) 279 + return IRQ_HANDLED; 280 + 281 + if (dwc->xhci) 282 + pm_runtime_resume(&dwc->xhci->dev); 283 + 284 + return IRQ_HANDLED; 285 + } 286 + 287 + static void dwc3_qcom_select_utmi_clk(struct dwc3_qcom *qcom) 288 + { 289 + /* Configure dwc3 to use UTMI clock as PIPE clock not present */ 290 + dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG, 291 + PIPE_UTMI_CLK_DIS); 292 + 293 + usleep_range(100, 1000); 294 + 295 + dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG, 296 + PIPE_UTMI_CLK_SEL | PIPE3_PHYSTATUS_SW); 297 + 298 + usleep_range(100, 1000); 299 + 300 + dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG, 301 + PIPE_UTMI_CLK_DIS); 302 + } 303 + 304 + static int dwc3_qcom_setup_irq(struct platform_device *pdev) 305 + { 306 + struct dwc3_qcom *qcom = platform_get_drvdata(pdev); 307 + int irq, ret; 308 + 309 + irq = platform_get_irq_byname(pdev, "hs_phy_irq"); 310 + if (irq > 0) { 311 + /* Keep wakeup interrupts disabled until suspend */ 312 + irq_set_status_flags(irq, IRQ_NOAUTOEN); 313 + ret = devm_request_threaded_irq(qcom->dev, irq, NULL, 314 + qcom_dwc3_resume_irq, 315 + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 316 + "qcom_dwc3 HS", qcom); 317 + if (ret) { 318 + dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret); 319 + return ret; 320 + } 321 + qcom->hs_phy_irq = irq; 322 + } 323 + 324 + irq = platform_get_irq_byname(pdev, "dp_hs_phy_irq"); 325 + if (irq > 0) { 326 + irq_set_status_flags(irq, IRQ_NOAUTOEN); 327 + ret = devm_request_threaded_irq(qcom->dev, irq, NULL, 328 + qcom_dwc3_resume_irq, 329 + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 330 + "qcom_dwc3 DP_HS", qcom); 331 + if (ret) { 332 + dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret); 333 + return ret; 334 + } 335 + qcom->dp_hs_phy_irq = irq; 336 + } 337 + 338 + irq = platform_get_irq_byname(pdev, "dm_hs_phy_irq"); 339 + if (irq > 0) { 340 + irq_set_status_flags(irq, IRQ_NOAUTOEN); 341 + ret = devm_request_threaded_irq(qcom->dev, irq, NULL, 342 + qcom_dwc3_resume_irq, 343 + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 344 + "qcom_dwc3 DM_HS", qcom); 345 + if (ret) { 346 + dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret); 347 + return ret; 348 + } 349 + qcom->dm_hs_phy_irq = irq; 350 + } 351 + 352 + irq = platform_get_irq_byname(pdev, "ss_phy_irq"); 353 + if (irq > 0) { 354 + irq_set_status_flags(irq, IRQ_NOAUTOEN); 355 + ret = devm_request_threaded_irq(qcom->dev, irq, NULL, 356 + qcom_dwc3_resume_irq, 357 + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 358 + "qcom_dwc3 SS", qcom); 359 + if (ret) { 360 + dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret); 361 + return ret; 362 + } 363 + qcom->ss_phy_irq = irq; 364 + } 365 + 366 + return 0; 367 + } 368 + 369 + static int dwc3_qcom_clk_init(struct dwc3_qcom *qcom, int count) 370 + { 371 + struct device *dev = qcom->dev; 372 + struct device_node *np = dev->of_node; 373 + int i; 374 + 375 + qcom->num_clocks = count; 376 + 377 + if (!count) 378 + return 0; 379 + 380 + qcom->clks = devm_kcalloc(dev, qcom->num_clocks, 381 + sizeof(struct clk *), GFP_KERNEL); 382 + if (!qcom->clks) 383 + return -ENOMEM; 384 + 385 + for (i = 0; i < qcom->num_clocks; i++) { 386 + struct clk *clk; 387 + int ret; 388 + 389 + clk = of_clk_get(np, i); 390 + if (IS_ERR(clk)) { 391 + while (--i >= 0) 392 + clk_put(qcom->clks[i]); 393 + return PTR_ERR(clk); 394 + } 395 + 396 + ret = clk_prepare_enable(clk); 397 + if (ret < 0) { 398 + while (--i >= 0) { 399 + clk_disable_unprepare(qcom->clks[i]); 400 + clk_put(qcom->clks[i]); 401 + } 402 + clk_put(clk); 403 + 404 + return ret; 405 + } 406 + 407 + qcom->clks[i] = clk; 408 + } 409 + 410 + return 0; 411 + } 412 + 413 + static int dwc3_qcom_probe(struct platform_device *pdev) 414 + { 415 + struct device_node *np = pdev->dev.of_node, *dwc3_np; 416 + struct device *dev = &pdev->dev; 417 + struct dwc3_qcom *qcom; 418 + struct resource *res; 419 + int ret, i; 420 + bool ignore_pipe_clk; 421 + 422 + qcom = devm_kzalloc(&pdev->dev, sizeof(*qcom), GFP_KERNEL); 423 + if (!qcom) 424 + return -ENOMEM; 425 + 426 + platform_set_drvdata(pdev, qcom); 427 + qcom->dev = &pdev->dev; 428 + 429 + qcom->resets = devm_reset_control_array_get_optional_exclusive(dev); 430 + if (IS_ERR(qcom->resets)) { 431 + ret = PTR_ERR(qcom->resets); 432 + dev_err(&pdev->dev, "failed to get resets, err=%d\n", ret); 433 + return ret; 434 + } 435 + 436 + ret = reset_control_assert(qcom->resets); 437 + if (ret) { 438 + dev_err(&pdev->dev, "failed to assert resets, err=%d\n", ret); 439 + return ret; 440 + } 441 + 442 + usleep_range(10, 1000); 443 + 444 + ret = reset_control_deassert(qcom->resets); 445 + if (ret) { 446 + dev_err(&pdev->dev, "failed to deassert resets, err=%d\n", ret); 447 + goto reset_assert; 448 + } 449 + 450 + ret = dwc3_qcom_clk_init(qcom, of_count_phandle_with_args(np, 451 + "clocks", "#clock-cells")); 452 + if (ret) { 453 + dev_err(dev, "failed to get clocks\n"); 454 + goto reset_assert; 455 + } 456 + 457 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 458 + qcom->qscratch_base = devm_ioremap_resource(dev, res); 459 + if (IS_ERR(qcom->qscratch_base)) { 460 + dev_err(dev, "failed to map qscratch, err=%d\n", ret); 461 + ret = PTR_ERR(qcom->qscratch_base); 462 + goto clk_disable; 463 + } 464 + 465 + ret = dwc3_qcom_setup_irq(pdev); 466 + if (ret) 467 + goto clk_disable; 468 + 469 + dwc3_np = of_get_child_by_name(np, "dwc3"); 470 + if (!dwc3_np) { 471 + dev_err(dev, "failed to find dwc3 core child\n"); 472 + ret = -ENODEV; 473 + goto clk_disable; 474 + } 475 + 476 + /* 477 + * Disable pipe_clk requirement if specified. Used when dwc3 478 + * operates without SSPHY and only HS/FS/LS modes are supported. 479 + */ 480 + ignore_pipe_clk = device_property_read_bool(dev, 481 + "qcom,select-utmi-as-pipe-clk"); 482 + if (ignore_pipe_clk) 483 + dwc3_qcom_select_utmi_clk(qcom); 484 + 485 + ret = of_platform_populate(np, NULL, NULL, dev); 486 + if (ret) { 487 + dev_err(dev, "failed to register dwc3 core - %d\n", ret); 488 + goto clk_disable; 489 + } 490 + 491 + qcom->dwc3 = of_find_device_by_node(dwc3_np); 492 + if (!qcom->dwc3) { 493 + dev_err(&pdev->dev, "failed to get dwc3 platform device\n"); 494 + goto depopulate; 495 + } 496 + 497 + qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev); 498 + 499 + /* enable vbus override for device mode */ 500 + if (qcom->mode == USB_DR_MODE_PERIPHERAL) 501 + dwc3_qcom_vbus_overrride_enable(qcom, true); 502 + 503 + /* register extcon to override sw_vbus on Vbus change later */ 504 + ret = dwc3_qcom_register_extcon(qcom); 505 + if (ret) 506 + goto depopulate; 507 + 508 + device_init_wakeup(&pdev->dev, 1); 509 + qcom->is_suspended = false; 510 + pm_runtime_set_active(dev); 511 + pm_runtime_enable(dev); 512 + pm_runtime_forbid(dev); 513 + 514 + return 0; 515 + 516 + depopulate: 517 + of_platform_depopulate(&pdev->dev); 518 + clk_disable: 519 + for (i = qcom->num_clocks - 1; i >= 0; i--) { 520 + clk_disable_unprepare(qcom->clks[i]); 521 + clk_put(qcom->clks[i]); 522 + } 523 + reset_assert: 524 + reset_control_assert(qcom->resets); 525 + 526 + return ret; 527 + } 528 + 529 + static int dwc3_qcom_remove(struct platform_device *pdev) 530 + { 531 + struct dwc3_qcom *qcom = platform_get_drvdata(pdev); 532 + struct device *dev = &pdev->dev; 533 + int i; 534 + 535 + of_platform_depopulate(dev); 536 + 537 + for (i = qcom->num_clocks - 1; i >= 0; i--) { 538 + clk_disable_unprepare(qcom->clks[i]); 539 + clk_put(qcom->clks[i]); 540 + } 541 + qcom->num_clocks = 0; 542 + 543 + reset_control_assert(qcom->resets); 544 + 545 + pm_runtime_allow(dev); 546 + pm_runtime_disable(dev); 547 + 548 + return 0; 549 + } 550 + 551 + #ifdef CONFIG_PM_SLEEP 552 + static int dwc3_qcom_pm_suspend(struct device *dev) 553 + { 554 + struct dwc3_qcom *qcom = dev_get_drvdata(dev); 555 + int ret = 0; 556 + 557 + ret = dwc3_qcom_suspend(qcom); 558 + if (!ret) 559 + qcom->pm_suspended = true; 560 + 561 + return ret; 562 + } 563 + 564 + static int dwc3_qcom_pm_resume(struct device *dev) 565 + { 566 + struct dwc3_qcom *qcom = dev_get_drvdata(dev); 567 + int ret; 568 + 569 + ret = dwc3_qcom_resume(qcom); 570 + if (!ret) 571 + qcom->pm_suspended = false; 572 + 573 + return ret; 574 + } 575 + #endif 576 + 577 + #ifdef CONFIG_PM 578 + static int dwc3_qcom_runtime_suspend(struct device *dev) 579 + { 580 + struct dwc3_qcom *qcom = dev_get_drvdata(dev); 581 + 582 + return dwc3_qcom_suspend(qcom); 583 + } 584 + 585 + static int dwc3_qcom_runtime_resume(struct device *dev) 586 + { 587 + struct dwc3_qcom *qcom = dev_get_drvdata(dev); 588 + 589 + return dwc3_qcom_resume(qcom); 590 + } 591 + #endif 592 + 593 + static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = { 594 + SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume) 595 + SET_RUNTIME_PM_OPS(dwc3_qcom_runtime_suspend, dwc3_qcom_runtime_resume, 596 + NULL) 597 + }; 598 + 599 + static const struct of_device_id dwc3_qcom_of_match[] = { 600 + { .compatible = "qcom,dwc3" }, 601 + { .compatible = "qcom,msm8996-dwc3" }, 602 + { .compatible = "qcom,sdm845-dwc3" }, 603 + { } 604 + }; 605 + MODULE_DEVICE_TABLE(of, dwc3_qcom_of_match); 606 + 607 + static struct platform_driver dwc3_qcom_driver = { 608 + .probe = dwc3_qcom_probe, 609 + .remove = dwc3_qcom_remove, 610 + .driver = { 611 + .name = "dwc3-qcom", 612 + .pm = &dwc3_qcom_dev_pm_ops, 613 + .of_match_table = dwc3_qcom_of_match, 614 + }, 615 + }; 616 + 617 + module_platform_driver(dwc3_qcom_driver); 618 + 619 + MODULE_LICENSE("GPL v2"); 620 + MODULE_DESCRIPTION("DesignWare DWC3 QCOM Glue Driver");
+2 -4
drivers/usb/dwc3/ep0.c
··· 66 66 struct dwc3 *dwc; 67 67 int ret; 68 68 69 - if (dep->flags & DWC3_EP_BUSY) 69 + if (dep->flags & DWC3_EP_TRANSFER_STARTED) 70 70 return 0; 71 71 72 72 dwc = dep->dwc; ··· 79 79 if (ret < 0) 80 80 return ret; 81 81 82 - dep->flags |= DWC3_EP_BUSY; 83 - dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); 84 82 dwc->ep0_next_event = DWC3_EP0_COMPLETE; 85 83 86 84 return 0; ··· 911 913 { 912 914 struct dwc3_ep *dep = dwc->eps[event->endpoint_number]; 913 915 914 - dep->flags &= ~DWC3_EP_BUSY; 916 + dep->flags &= ~DWC3_EP_TRANSFER_STARTED; 915 917 dep->resource_index = 0; 916 918 dwc->setup_packet_pending = false; 917 919
+383 -434
drivers/usb/dwc3/gadget.c
··· 27 27 #include "gadget.h" 28 28 #include "io.h" 29 29 30 + #define DWC3_ALIGN_FRAME(d) (((d)->frame_number + (d)->interval) \ 31 + & ~((d)->interval - 1)) 32 + 30 33 /** 31 34 * dwc3_gadget_set_test_mode - enables usb2 test modes 32 35 * @dwc: pointer to our context structure ··· 378 375 switch (DWC3_DEPCMD_CMD(cmd)) { 379 376 case DWC3_DEPCMD_STARTTRANSFER: 380 377 dep->flags |= DWC3_EP_TRANSFER_STARTED; 378 + dwc3_gadget_ep_get_transfer_index(dep); 381 379 break; 382 380 case DWC3_DEPCMD_ENDTRANSFER: 383 381 dep->flags &= ~DWC3_EP_TRANSFER_STARTED; ··· 459 455 dep->trb_pool_dma = 0; 460 456 } 461 457 462 - static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep); 458 + static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep) 459 + { 460 + struct dwc3_gadget_ep_cmd_params params; 461 + 462 + memset(&params, 0x00, sizeof(params)); 463 + 464 + params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 465 + 466 + return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, 467 + &params); 468 + } 463 469 464 470 /** 465 471 * dwc3_gadget_start_config - configure ep resources ··· 505 491 * triggered only when called for EP0-out, which always happens first, and which 506 492 * should only happen in one of the above conditions. 507 493 */ 508 - static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 494 + static int dwc3_gadget_start_config(struct dwc3_ep *dep) 509 495 { 510 496 struct dwc3_gadget_ep_cmd_params params; 497 + struct dwc3 *dwc; 511 498 u32 cmd; 512 499 int i; 513 500 int ret; ··· 518 503 519 504 memset(&params, 0x00, sizeof(params)); 520 505 cmd = DWC3_DEPCMD_DEPSTARTCFG; 506 + dwc = dep->dwc; 521 507 522 508 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params); 523 509 if (ret) ··· 530 514 if (!dep) 531 515 continue; 532 516 533 - ret = dwc3_gadget_set_xfer_resource(dwc, dep); 517 + ret = dwc3_gadget_set_xfer_resource(dep); 534 518 if (ret) 535 519 return ret; 536 520 } ··· 538 522 return 0; 539 523 } 540 524 541 - static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 542 - bool modify, bool restore) 525 + static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action) 543 526 { 544 527 const struct usb_ss_ep_comp_descriptor *comp_desc; 545 528 const struct usb_endpoint_descriptor *desc; 546 529 struct dwc3_gadget_ep_cmd_params params; 547 - 548 - if (dev_WARN_ONCE(dwc->dev, modify && restore, 549 - "Can't modify and restore\n")) 550 - return -EINVAL; 530 + struct dwc3 *dwc = dep->dwc; 551 531 552 532 comp_desc = dep->endpoint.comp_desc; 553 533 desc = dep->endpoint.desc; ··· 559 547 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1); 560 548 } 561 549 562 - if (modify) { 563 - params.param0 |= DWC3_DEPCFG_ACTION_MODIFY; 564 - } else if (restore) { 565 - params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 550 + params.param0 |= action; 551 + if (action == DWC3_DEPCFG_ACTION_RESTORE) 566 552 params.param2 |= dep->saved_state; 567 - } else { 568 - params.param0 |= DWC3_DEPCFG_ACTION_INIT; 569 - } 570 553 571 554 if (usb_endpoint_xfer_control(desc)) 572 555 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN; ··· 601 594 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params); 602 595 } 603 596 604 - static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 605 - { 606 - struct dwc3_gadget_ep_cmd_params params; 607 - 608 - memset(&params, 0x00, sizeof(params)); 609 - 610 - params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 611 - 612 - return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, 613 - &params); 614 - } 615 - 616 597 /** 617 598 * __dwc3_gadget_ep_enable - initializes a hw endpoint 618 599 * @dep: endpoint to be initialized 619 - * @modify: if true, modify existing endpoint configuration 620 - * @restore: if true, restore endpoint configuration from scratch buffer 600 + * @action: one of INIT, MODIFY or RESTORE 621 601 * 622 602 * Caller should take care of locking. Execute all necessary commands to 623 603 * initialize a HW endpoint so it can be used by a gadget driver. 624 604 */ 625 - static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 626 - bool modify, bool restore) 605 + static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action) 627 606 { 628 607 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 629 608 struct dwc3 *dwc = dep->dwc; ··· 618 625 int ret; 619 626 620 627 if (!(dep->flags & DWC3_EP_ENABLED)) { 621 - ret = dwc3_gadget_start_config(dwc, dep); 628 + ret = dwc3_gadget_start_config(dep); 622 629 if (ret) 623 630 return ret; 624 631 } 625 632 626 - ret = dwc3_gadget_set_ep_config(dwc, dep, modify, restore); 633 + ret = dwc3_gadget_set_ep_config(dep, action); 627 634 if (ret) 628 635 return ret; 629 636 ··· 664 671 * Issue StartTransfer here with no-op TRB so we can always rely on No 665 672 * Response Update Transfer command. 666 673 */ 667 - if (usb_endpoint_xfer_bulk(desc)) { 674 + if (usb_endpoint_xfer_bulk(desc) || 675 + usb_endpoint_xfer_int(desc)) { 668 676 struct dwc3_gadget_ep_cmd_params params; 669 677 struct dwc3_trb *trb; 670 678 dma_addr_t trb_dma; ··· 683 689 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params); 684 690 if (ret < 0) 685 691 return ret; 686 - 687 - dep->flags |= DWC3_EP_BUSY; 688 - 689 - dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); 690 - WARN_ON_ONCE(!dep->resource_index); 691 692 } 692 - 693 693 694 694 out: 695 695 trace_dwc3_gadget_ep_enable(dep); ··· 691 703 return 0; 692 704 } 693 705 694 - static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 706 + static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force); 695 707 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 696 708 { 697 709 struct dwc3_request *req; 698 710 699 - dwc3_stop_active_transfer(dwc, dep->number, true); 711 + dwc3_stop_active_transfer(dep, true); 700 712 701 713 /* - giveback all requests to gadget driver */ 702 714 while (!list_empty(&dep->started_list)) { ··· 794 806 return 0; 795 807 796 808 spin_lock_irqsave(&dwc->lock, flags); 797 - ret = __dwc3_gadget_ep_enable(dep, false, false); 809 + ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 798 810 spin_unlock_irqrestore(&dwc->lock, flags); 799 811 800 812 return ret; ··· 828 840 } 829 841 830 842 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 831 - gfp_t gfp_flags) 843 + gfp_t gfp_flags) 832 844 { 833 845 struct dwc3_request *req; 834 846 struct dwc3_ep *dep = to_dwc3_ep(ep); ··· 837 849 if (!req) 838 850 return NULL; 839 851 852 + req->direction = dep->direction; 840 853 req->epnum = dep->number; 841 854 req->dep = dep; 842 - 843 - dep->allocated_requests++; 844 855 845 856 trace_dwc3_alloc_request(req); 846 857 ··· 850 863 struct usb_request *request) 851 864 { 852 865 struct dwc3_request *req = to_dwc3_request(request); 853 - struct dwc3_ep *dep = to_dwc3_ep(ep); 854 866 855 - dep->allocated_requests--; 856 867 trace_dwc3_free_request(req); 857 868 kfree(req); 858 869 } 859 870 860 - static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep); 871 + /** 872 + * dwc3_ep_prev_trb - returns the previous TRB in the ring 873 + * @dep: The endpoint with the TRB ring 874 + * @index: The index of the current TRB in the ring 875 + * 876 + * Returns the TRB prior to the one pointed to by the index. If the 877 + * index is 0, we will wrap backwards, skip the link TRB, and return 878 + * the one just before that. 879 + */ 880 + static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) 881 + { 882 + u8 tmp = index; 883 + 884 + if (!tmp) 885 + tmp = DWC3_TRB_NUM - 1; 886 + 887 + return &dep->trb_pool[tmp - 1]; 888 + } 889 + 890 + static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) 891 + { 892 + struct dwc3_trb *tmp; 893 + u8 trbs_left; 894 + 895 + /* 896 + * If enqueue & dequeue are equal than it is either full or empty. 897 + * 898 + * One way to know for sure is if the TRB right before us has HWO bit 899 + * set or not. If it has, then we're definitely full and can't fit any 900 + * more transfers in our ring. 901 + */ 902 + if (dep->trb_enqueue == dep->trb_dequeue) { 903 + tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 904 + if (tmp->ctrl & DWC3_TRB_CTRL_HWO) 905 + return 0; 906 + 907 + return DWC3_TRB_NUM - 1; 908 + } 909 + 910 + trbs_left = dep->trb_dequeue - dep->trb_enqueue; 911 + trbs_left &= (DWC3_TRB_NUM - 1); 912 + 913 + if (dep->trb_dequeue < dep->trb_enqueue) 914 + trbs_left--; 915 + 916 + return trbs_left; 917 + } 861 918 862 919 static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, 863 920 dma_addr_t dma, unsigned length, unsigned chain, unsigned node, ··· 1016 985 struct dwc3_request *req, unsigned chain, unsigned node) 1017 986 { 1018 987 struct dwc3_trb *trb; 1019 - unsigned length = req->request.length; 988 + unsigned int length; 989 + dma_addr_t dma; 1020 990 unsigned stream_id = req->request.stream_id; 1021 991 unsigned short_not_ok = req->request.short_not_ok; 1022 992 unsigned no_interrupt = req->request.no_interrupt; 1023 - dma_addr_t dma = req->request.dma; 993 + 994 + if (req->request.num_sgs > 0) { 995 + length = sg_dma_len(req->start_sg); 996 + dma = sg_dma_address(req->start_sg); 997 + } else { 998 + length = req->request.length; 999 + dma = req->request.dma; 1000 + } 1024 1001 1025 1002 trb = &dep->trb_pool[dep->trb_enqueue]; 1026 1003 ··· 1036 997 dwc3_gadget_move_started_request(req); 1037 998 req->trb = trb; 1038 999 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 1039 - dep->queued_requests++; 1040 1000 } 1041 1001 1042 1002 __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, 1043 1003 stream_id, short_not_ok, no_interrupt); 1044 1004 } 1045 1005 1046 - /** 1047 - * dwc3_ep_prev_trb - returns the previous TRB in the ring 1048 - * @dep: The endpoint with the TRB ring 1049 - * @index: The index of the current TRB in the ring 1050 - * 1051 - * Returns the TRB prior to the one pointed to by the index. If the 1052 - * index is 0, we will wrap backwards, skip the link TRB, and return 1053 - * the one just before that. 1054 - */ 1055 - static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) 1056 - { 1057 - u8 tmp = index; 1058 - 1059 - if (!tmp) 1060 - tmp = DWC3_TRB_NUM - 1; 1061 - 1062 - return &dep->trb_pool[tmp - 1]; 1063 - } 1064 - 1065 - static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) 1066 - { 1067 - struct dwc3_trb *tmp; 1068 - u8 trbs_left; 1069 - 1070 - /* 1071 - * If enqueue & dequeue are equal than it is either full or empty. 1072 - * 1073 - * One way to know for sure is if the TRB right before us has HWO bit 1074 - * set or not. If it has, then we're definitely full and can't fit any 1075 - * more transfers in our ring. 1076 - */ 1077 - if (dep->trb_enqueue == dep->trb_dequeue) { 1078 - tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 1079 - if (tmp->ctrl & DWC3_TRB_CTRL_HWO) 1080 - return 0; 1081 - 1082 - return DWC3_TRB_NUM - 1; 1083 - } 1084 - 1085 - trbs_left = dep->trb_dequeue - dep->trb_enqueue; 1086 - trbs_left &= (DWC3_TRB_NUM - 1); 1087 - 1088 - if (dep->trb_dequeue < dep->trb_enqueue) 1089 - trbs_left--; 1090 - 1091 - return trbs_left; 1092 - } 1093 - 1094 1006 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, 1095 1007 struct dwc3_request *req) 1096 1008 { 1097 - struct scatterlist *sg = req->sg; 1009 + struct scatterlist *sg = req->start_sg; 1098 1010 struct scatterlist *s; 1099 1011 int i; 1100 1012 1101 - for_each_sg(sg, s, req->num_pending_sgs, i) { 1013 + unsigned int remaining = req->request.num_mapped_sgs 1014 + - req->num_queued_sgs; 1015 + 1016 + for_each_sg(sg, s, remaining, i) { 1102 1017 unsigned int length = req->request.length; 1103 1018 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1104 1019 unsigned int rem = length % maxp; ··· 1080 1087 } else { 1081 1088 dwc3_prepare_one_trb(dep, req, chain, i); 1082 1089 } 1090 + 1091 + /* 1092 + * There can be a situation where all sgs in sglist are not 1093 + * queued because of insufficient trb number. To handle this 1094 + * case, update start_sg to next sg to be queued, so that 1095 + * we have free trbs we can continue queuing from where we 1096 + * previously stopped 1097 + */ 1098 + if (chain) 1099 + req->start_sg = sg_next(s); 1100 + 1101 + req->num_queued_sgs++; 1083 1102 1084 1103 if (!dwc3_calc_trbs_left(dep)) 1085 1104 break; ··· 1183 1178 return; 1184 1179 1185 1180 req->sg = req->request.sg; 1181 + req->start_sg = req->sg; 1182 + req->num_queued_sgs = 0; 1186 1183 req->num_pending_sgs = req->request.num_mapped_sgs; 1187 1184 1188 1185 if (req->num_pending_sgs > 0) ··· 1208 1201 if (!dwc3_calc_trbs_left(dep)) 1209 1202 return 0; 1210 1203 1211 - starting = !(dep->flags & DWC3_EP_BUSY); 1204 + starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED); 1212 1205 1213 1206 dwc3_prepare_trbs(dep); 1214 1207 req = next_request(&dep->started_list); ··· 1240 1233 */ 1241 1234 if (req->trb) 1242 1235 memset(req->trb, 0, sizeof(struct dwc3_trb)); 1243 - dep->queued_requests--; 1244 1236 dwc3_gadget_del_and_unmap_request(dep, req, ret); 1245 1237 return ret; 1246 - } 1247 - 1248 - dep->flags |= DWC3_EP_BUSY; 1249 - 1250 - if (starting) { 1251 - dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); 1252 - WARN_ON_ONCE(!dep->resource_index); 1253 1238 } 1254 1239 1255 1240 return 0; ··· 1255 1256 return DWC3_DSTS_SOFFN(reg); 1256 1257 } 1257 1258 1258 - static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1259 - struct dwc3_ep *dep, u32 cur_uf) 1259 + static void __dwc3_gadget_start_isoc(struct dwc3_ep *dep) 1260 1260 { 1261 1261 if (list_empty(&dep->pending_list)) { 1262 - dev_info(dwc->dev, "%s: ran out of requests\n", 1262 + dev_info(dep->dwc->dev, "%s: ran out of requests\n", 1263 1263 dep->name); 1264 1264 dep->flags |= DWC3_EP_PENDING_REQUEST; 1265 1265 return; 1266 1266 } 1267 1267 1268 - /* 1269 - * Schedule the first trb for one interval in the future or at 1270 - * least 4 microframes. 1271 - */ 1272 - dep->frame_number = cur_uf + max_t(u32, 4, dep->interval); 1268 + dep->frame_number = DWC3_ALIGN_FRAME(dep); 1273 1269 __dwc3_gadget_kick_transfer(dep); 1274 - } 1275 - 1276 - static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1277 - struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1278 - { 1279 - u32 cur_uf, mask; 1280 - 1281 - mask = ~(dep->interval - 1); 1282 - cur_uf = event->parameters & mask; 1283 - 1284 - __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1285 1270 } 1286 1271 1287 1272 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) ··· 1286 1303 1287 1304 req->request.actual = 0; 1288 1305 req->request.status = -EINPROGRESS; 1289 - req->direction = dep->direction; 1290 - req->epnum = dep->number; 1291 1306 1292 1307 trace_dwc3_ep_queue(req); 1293 1308 ··· 1300 1319 * errors which will force us issue EndTransfer command. 1301 1320 */ 1302 1321 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1303 - if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { 1304 - if (dep->flags & DWC3_EP_TRANSFER_STARTED) { 1305 - dwc3_stop_active_transfer(dwc, dep->number, true); 1306 - dep->flags = DWC3_EP_ENABLED; 1307 - } else { 1308 - u32 cur_uf; 1309 - 1310 - cur_uf = __dwc3_gadget_get_frame(dwc); 1311 - __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1312 - dep->flags &= ~DWC3_EP_PENDING_REQUEST; 1313 - } 1322 + if (!(dep->flags & DWC3_EP_PENDING_REQUEST) && 1323 + !(dep->flags & DWC3_EP_TRANSFER_STARTED)) 1314 1324 return 0; 1325 + 1326 + if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { 1327 + if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) { 1328 + __dwc3_gadget_start_isoc(dep); 1329 + return 0; 1330 + } 1315 1331 } 1316 - 1317 - if ((dep->flags & DWC3_EP_BUSY) && 1318 - !(dep->flags & DWC3_EP_MISSED_ISOC)) 1319 - goto out; 1320 - 1321 - return 0; 1322 1332 } 1323 1333 1324 - out: 1325 1334 return __dwc3_gadget_kick_transfer(dep); 1326 1335 } 1327 1336 ··· 1361 1390 } 1362 1391 if (r == req) { 1363 1392 /* wait until it is processed */ 1364 - dwc3_stop_active_transfer(dwc, dep->number, true); 1393 + dwc3_stop_active_transfer(dep, true); 1365 1394 1366 1395 /* 1367 1396 * If request was already started, this means we had to ··· 1434 1463 1435 1464 out1: 1436 1465 /* giveback the request */ 1437 - dep->queued_requests--; 1466 + 1438 1467 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1439 1468 1440 1469 out0: ··· 1849 1878 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1850 1879 1851 1880 dep = dwc->eps[0]; 1852 - ret = __dwc3_gadget_ep_enable(dep, false, false); 1881 + ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 1853 1882 if (ret) { 1854 1883 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1855 1884 goto err0; 1856 1885 } 1857 1886 1858 1887 dep = dwc->eps[1]; 1859 - ret = __dwc3_gadget_ep_enable(dep, false, false); 1888 + ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 1860 1889 if (ret) { 1861 1890 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1862 1891 goto err1; ··· 2053 2082 2054 2083 /* -------------------------------------------------------------------------- */ 2055 2084 2056 - static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total) 2085 + static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep) 2086 + { 2087 + struct dwc3 *dwc = dep->dwc; 2088 + 2089 + usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 2090 + dep->endpoint.maxburst = 1; 2091 + dep->endpoint.ops = &dwc3_gadget_ep0_ops; 2092 + if (!dep->direction) 2093 + dwc->gadget.ep0 = &dep->endpoint; 2094 + 2095 + dep->endpoint.caps.type_control = true; 2096 + 2097 + return 0; 2098 + } 2099 + 2100 + static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) 2101 + { 2102 + struct dwc3 *dwc = dep->dwc; 2103 + int mdwidth; 2104 + int kbytes; 2105 + int size; 2106 + 2107 + mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 2108 + /* MDWIDTH is represented in bits, we need it in bytes */ 2109 + mdwidth /= 8; 2110 + 2111 + size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1)); 2112 + if (dwc3_is_usb31(dwc)) 2113 + size = DWC31_GTXFIFOSIZ_TXFDEF(size); 2114 + else 2115 + size = DWC3_GTXFIFOSIZ_TXFDEF(size); 2116 + 2117 + /* FIFO Depth is in MDWDITH bytes. Multiply */ 2118 + size *= mdwidth; 2119 + 2120 + kbytes = size / 1024; 2121 + if (kbytes == 0) 2122 + kbytes = 1; 2123 + 2124 + /* 2125 + * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for 2126 + * internal overhead. We don't really know how these are used, 2127 + * but documentation say it exists. 2128 + */ 2129 + size -= mdwidth * (kbytes + 1); 2130 + size /= kbytes; 2131 + 2132 + usb_ep_set_maxpacket_limit(&dep->endpoint, size); 2133 + 2134 + dep->endpoint.max_streams = 15; 2135 + dep->endpoint.ops = &dwc3_gadget_ep_ops; 2136 + list_add_tail(&dep->endpoint.ep_list, 2137 + &dwc->gadget.ep_list); 2138 + dep->endpoint.caps.type_iso = true; 2139 + dep->endpoint.caps.type_bulk = true; 2140 + dep->endpoint.caps.type_int = true; 2141 + 2142 + return dwc3_alloc_trb_pool(dep); 2143 + } 2144 + 2145 + static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep) 2146 + { 2147 + struct dwc3 *dwc = dep->dwc; 2148 + 2149 + usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 2150 + dep->endpoint.max_streams = 15; 2151 + dep->endpoint.ops = &dwc3_gadget_ep_ops; 2152 + list_add_tail(&dep->endpoint.ep_list, 2153 + &dwc->gadget.ep_list); 2154 + dep->endpoint.caps.type_iso = true; 2155 + dep->endpoint.caps.type_bulk = true; 2156 + dep->endpoint.caps.type_int = true; 2157 + 2158 + return dwc3_alloc_trb_pool(dep); 2159 + } 2160 + 2161 + static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum) 2057 2162 { 2058 2163 struct dwc3_ep *dep; 2164 + bool direction = epnum & 1; 2165 + int ret; 2166 + u8 num = epnum >> 1; 2167 + 2168 + dep = kzalloc(sizeof(*dep), GFP_KERNEL); 2169 + if (!dep) 2170 + return -ENOMEM; 2171 + 2172 + dep->dwc = dwc; 2173 + dep->number = epnum; 2174 + dep->direction = direction; 2175 + dep->regs = dwc->regs + DWC3_DEP_BASE(epnum); 2176 + dwc->eps[epnum] = dep; 2177 + 2178 + snprintf(dep->name, sizeof(dep->name), "ep%u%s", num, 2179 + direction ? "in" : "out"); 2180 + 2181 + dep->endpoint.name = dep->name; 2182 + 2183 + if (!(dep->number > 1)) { 2184 + dep->endpoint.desc = &dwc3_gadget_ep0_desc; 2185 + dep->endpoint.comp_desc = NULL; 2186 + } 2187 + 2188 + spin_lock_init(&dep->lock); 2189 + 2190 + if (num == 0) 2191 + ret = dwc3_gadget_init_control_endpoint(dep); 2192 + else if (direction) 2193 + ret = dwc3_gadget_init_in_endpoint(dep); 2194 + else 2195 + ret = dwc3_gadget_init_out_endpoint(dep); 2196 + 2197 + if (ret) 2198 + return ret; 2199 + 2200 + dep->endpoint.caps.dir_in = direction; 2201 + dep->endpoint.caps.dir_out = !direction; 2202 + 2203 + INIT_LIST_HEAD(&dep->pending_list); 2204 + INIT_LIST_HEAD(&dep->started_list); 2205 + 2206 + return 0; 2207 + } 2208 + 2209 + static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total) 2210 + { 2059 2211 u8 epnum; 2060 2212 2061 2213 INIT_LIST_HEAD(&dwc->gadget.ep_list); 2062 2214 2063 2215 for (epnum = 0; epnum < total; epnum++) { 2064 - bool direction = epnum & 1; 2065 - u8 num = epnum >> 1; 2216 + int ret; 2066 2217 2067 - dep = kzalloc(sizeof(*dep), GFP_KERNEL); 2068 - if (!dep) 2069 - return -ENOMEM; 2070 - 2071 - dep->dwc = dwc; 2072 - dep->number = epnum; 2073 - dep->direction = direction; 2074 - dep->regs = dwc->regs + DWC3_DEP_BASE(epnum); 2075 - dwc->eps[epnum] = dep; 2076 - 2077 - snprintf(dep->name, sizeof(dep->name), "ep%u%s", num, 2078 - direction ? "in" : "out"); 2079 - 2080 - dep->endpoint.name = dep->name; 2081 - 2082 - if (!(dep->number > 1)) { 2083 - dep->endpoint.desc = &dwc3_gadget_ep0_desc; 2084 - dep->endpoint.comp_desc = NULL; 2085 - } 2086 - 2087 - spin_lock_init(&dep->lock); 2088 - 2089 - if (num == 0) { 2090 - usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 2091 - dep->endpoint.maxburst = 1; 2092 - dep->endpoint.ops = &dwc3_gadget_ep0_ops; 2093 - if (!direction) 2094 - dwc->gadget.ep0 = &dep->endpoint; 2095 - } else if (direction) { 2096 - int mdwidth; 2097 - int kbytes; 2098 - int size; 2099 - int ret; 2100 - 2101 - mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 2102 - /* MDWIDTH is represented in bits, we need it in bytes */ 2103 - mdwidth /= 8; 2104 - 2105 - size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num)); 2106 - if (dwc3_is_usb31(dwc)) 2107 - size = DWC31_GTXFIFOSIZ_TXFDEF(size); 2108 - else 2109 - size = DWC3_GTXFIFOSIZ_TXFDEF(size); 2110 - 2111 - /* FIFO Depth is in MDWDITH bytes. Multiply */ 2112 - size *= mdwidth; 2113 - 2114 - kbytes = size / 1024; 2115 - if (kbytes == 0) 2116 - kbytes = 1; 2117 - 2118 - /* 2119 - * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for 2120 - * internal overhead. We don't really know how these are used, 2121 - * but documentation say it exists. 2122 - */ 2123 - size -= mdwidth * (kbytes + 1); 2124 - size /= kbytes; 2125 - 2126 - usb_ep_set_maxpacket_limit(&dep->endpoint, size); 2127 - 2128 - dep->endpoint.max_streams = 15; 2129 - dep->endpoint.ops = &dwc3_gadget_ep_ops; 2130 - list_add_tail(&dep->endpoint.ep_list, 2131 - &dwc->gadget.ep_list); 2132 - 2133 - ret = dwc3_alloc_trb_pool(dep); 2134 - if (ret) 2135 - return ret; 2136 - } else { 2137 - int ret; 2138 - 2139 - usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 2140 - dep->endpoint.max_streams = 15; 2141 - dep->endpoint.ops = &dwc3_gadget_ep_ops; 2142 - list_add_tail(&dep->endpoint.ep_list, 2143 - &dwc->gadget.ep_list); 2144 - 2145 - ret = dwc3_alloc_trb_pool(dep); 2146 - if (ret) 2147 - return ret; 2148 - } 2149 - 2150 - if (num == 0) { 2151 - dep->endpoint.caps.type_control = true; 2152 - } else { 2153 - dep->endpoint.caps.type_iso = true; 2154 - dep->endpoint.caps.type_bulk = true; 2155 - dep->endpoint.caps.type_int = true; 2156 - } 2157 - 2158 - dep->endpoint.caps.dir_in = direction; 2159 - dep->endpoint.caps.dir_out = !direction; 2160 - 2161 - INIT_LIST_HEAD(&dep->pending_list); 2162 - INIT_LIST_HEAD(&dep->started_list); 2218 + ret = dwc3_gadget_init_endpoint(dwc, epnum); 2219 + if (ret) 2220 + return ret; 2163 2221 } 2164 2222 2165 2223 return 0; ··· 2223 2223 2224 2224 /* -------------------------------------------------------------------------- */ 2225 2225 2226 - static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 2226 + static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, 2227 2227 struct dwc3_request *req, struct dwc3_trb *trb, 2228 - const struct dwc3_event_depevt *event, int status, 2229 - int chain) 2228 + const struct dwc3_event_depevt *event, int status, int chain) 2230 2229 { 2231 2230 unsigned int count; 2232 - unsigned int s_pkt = 0; 2233 - unsigned int trb_status; 2234 2231 2235 2232 dwc3_ep_inc_deq(dep); 2236 - 2237 - if (req->trb == trb) 2238 - dep->queued_requests--; 2239 2233 2240 2234 trace_dwc3_complete_trb(dep, trb); 2241 2235 ··· 2262 2268 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 2263 2269 return 1; 2264 2270 2265 - if (dep->direction) { 2266 - if (count) { 2267 - trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 2268 - if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 2269 - /* 2270 - * If missed isoc occurred and there is 2271 - * no request queued then issue END 2272 - * TRANSFER, so that core generates 2273 - * next xfernotready and we will issue 2274 - * a fresh START TRANSFER. 2275 - * If there are still queued request 2276 - * then wait, do not issue either END 2277 - * or UPDATE TRANSFER, just attach next 2278 - * request in pending_list during 2279 - * giveback.If any future queued request 2280 - * is successfully transferred then we 2281 - * will issue UPDATE TRANSFER for all 2282 - * request in the pending_list. 2283 - */ 2284 - dep->flags |= DWC3_EP_MISSED_ISOC; 2285 - } else { 2286 - dev_err(dwc->dev, "incomplete IN transfer %s\n", 2287 - dep->name); 2288 - status = -ECONNRESET; 2289 - } 2290 - } else { 2291 - dep->flags &= ~DWC3_EP_MISSED_ISOC; 2292 - } 2293 - } else { 2294 - if (count && (event->status & DEPEVT_STATUS_SHORT)) 2295 - s_pkt = 1; 2296 - } 2297 - 2298 - if (s_pkt && !chain) 2271 + if (event->status & DEPEVT_STATUS_SHORT && !chain) 2299 2272 return 1; 2300 2273 2301 - if ((event->status & DEPEVT_STATUS_IOC) && 2302 - (trb->ctrl & DWC3_TRB_CTRL_IOC)) 2274 + if (event->status & DEPEVT_STATUS_IOC) 2303 2275 return 1; 2304 2276 2305 2277 return 0; 2306 2278 } 2307 2279 2308 - static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 2309 - const struct dwc3_event_depevt *event, int status) 2280 + static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep, 2281 + struct dwc3_request *req, const struct dwc3_event_depevt *event, 2282 + int status) 2310 2283 { 2311 - struct dwc3_request *req, *n; 2312 - struct dwc3_trb *trb; 2313 - bool ioc = false; 2314 - int ret = 0; 2284 + struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue]; 2285 + struct scatterlist *sg = req->sg; 2286 + struct scatterlist *s; 2287 + unsigned int pending = req->num_pending_sgs; 2288 + unsigned int i; 2289 + int ret = 0; 2315 2290 2316 - list_for_each_entry_safe(req, n, &dep->started_list, list) { 2317 - unsigned length; 2318 - int chain; 2291 + for_each_sg(sg, s, pending, i) { 2292 + trb = &dep->trb_pool[dep->trb_dequeue]; 2319 2293 2320 - length = req->request.length; 2321 - chain = req->num_pending_sgs > 0; 2322 - if (chain) { 2323 - struct scatterlist *sg = req->sg; 2324 - struct scatterlist *s; 2325 - unsigned int pending = req->num_pending_sgs; 2326 - unsigned int i; 2327 - 2328 - for_each_sg(sg, s, pending, i) { 2329 - trb = &dep->trb_pool[dep->trb_dequeue]; 2330 - 2331 - if (trb->ctrl & DWC3_TRB_CTRL_HWO) 2332 - break; 2333 - 2334 - req->sg = sg_next(s); 2335 - req->num_pending_sgs--; 2336 - 2337 - ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 2338 - event, status, chain); 2339 - if (ret) 2340 - break; 2341 - } 2342 - } else { 2343 - trb = &dep->trb_pool[dep->trb_dequeue]; 2344 - ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 2345 - event, status, chain); 2346 - } 2347 - 2348 - if (req->unaligned || req->zero) { 2349 - trb = &dep->trb_pool[dep->trb_dequeue]; 2350 - ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 2351 - event, status, false); 2352 - req->unaligned = false; 2353 - req->zero = false; 2354 - } 2355 - 2356 - req->request.actual = length - req->remaining; 2357 - 2358 - if ((req->request.actual < length) && req->num_pending_sgs) 2359 - return __dwc3_gadget_kick_transfer(dep); 2360 - 2361 - dwc3_gadget_giveback(dep, req, status); 2362 - 2363 - if (ret) { 2364 - if ((event->status & DEPEVT_STATUS_IOC) && 2365 - (trb->ctrl & DWC3_TRB_CTRL_IOC)) 2366 - ioc = true; 2294 + if (trb->ctrl & DWC3_TRB_CTRL_HWO) 2367 2295 break; 2368 - } 2296 + 2297 + req->sg = sg_next(s); 2298 + req->num_pending_sgs--; 2299 + 2300 + ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req, 2301 + trb, event, status, true); 2302 + if (ret) 2303 + break; 2369 2304 } 2370 2305 2371 - /* 2372 - * Our endpoint might get disabled by another thread during 2373 - * dwc3_gadget_giveback(). If that happens, we're just gonna return 1 2374 - * early on so DWC3_EP_BUSY flag gets cleared 2375 - */ 2376 - if (!dep->endpoint.desc) 2377 - return 1; 2378 - 2379 - if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 2380 - list_empty(&dep->started_list)) { 2381 - if (list_empty(&dep->pending_list)) { 2382 - /* 2383 - * If there is no entry in request list then do 2384 - * not issue END TRANSFER now. Just set PENDING 2385 - * flag, so that END TRANSFER is issued when an 2386 - * entry is added into request list. 2387 - */ 2388 - dep->flags = DWC3_EP_PENDING_REQUEST; 2389 - } else { 2390 - dwc3_stop_active_transfer(dwc, dep->number, true); 2391 - dep->flags = DWC3_EP_ENABLED; 2392 - } 2393 - return 1; 2394 - } 2395 - 2396 - if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && ioc) 2397 - return 0; 2398 - 2399 - return 1; 2306 + return ret; 2400 2307 } 2401 2308 2402 - static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 2403 - struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 2309 + static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, 2310 + struct dwc3_request *req, const struct dwc3_event_depevt *event, 2311 + int status) 2404 2312 { 2405 - unsigned status = 0; 2406 - int clean_busy; 2407 - u32 is_xfer_complete; 2313 + struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue]; 2408 2314 2409 - is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE); 2315 + return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb, 2316 + event, status, false); 2317 + } 2318 + 2319 + static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) 2320 + { 2321 + return req->request.actual == req->request.length; 2322 + } 2323 + 2324 + static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, 2325 + const struct dwc3_event_depevt *event, 2326 + struct dwc3_request *req, int status) 2327 + { 2328 + int ret; 2329 + 2330 + if (req->num_pending_sgs) 2331 + ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event, 2332 + status); 2333 + else 2334 + ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, 2335 + status); 2336 + 2337 + if (req->unaligned || req->zero) { 2338 + ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, 2339 + status); 2340 + req->unaligned = false; 2341 + req->zero = false; 2342 + } 2343 + 2344 + req->request.actual = req->request.length - req->remaining; 2345 + 2346 + if (!dwc3_gadget_ep_request_completed(req) && 2347 + req->num_pending_sgs) { 2348 + __dwc3_gadget_kick_transfer(dep); 2349 + goto out; 2350 + } 2351 + 2352 + dwc3_gadget_giveback(dep, req, status); 2353 + 2354 + out: 2355 + return ret; 2356 + } 2357 + 2358 + static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep, 2359 + const struct dwc3_event_depevt *event, int status) 2360 + { 2361 + struct dwc3_request *req; 2362 + struct dwc3_request *tmp; 2363 + 2364 + list_for_each_entry_safe(req, tmp, &dep->started_list, list) { 2365 + int ret; 2366 + 2367 + ret = dwc3_gadget_ep_cleanup_completed_request(dep, event, 2368 + req, status); 2369 + if (ret) 2370 + break; 2371 + } 2372 + } 2373 + 2374 + static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep, 2375 + const struct dwc3_event_depevt *event) 2376 + { 2377 + dep->frame_number = event->parameters; 2378 + } 2379 + 2380 + static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep, 2381 + const struct dwc3_event_depevt *event) 2382 + { 2383 + struct dwc3 *dwc = dep->dwc; 2384 + unsigned status = 0; 2385 + bool stop = false; 2386 + 2387 + dwc3_gadget_endpoint_frame_from_event(dep, event); 2410 2388 2411 2389 if (event->status & DEPEVT_STATUS_BUSERR) 2412 2390 status = -ECONNRESET; 2413 2391 2414 - clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 2415 - if (clean_busy && (!dep->endpoint.desc || is_xfer_complete || 2416 - usb_endpoint_xfer_isoc(dep->endpoint.desc))) 2417 - dep->flags &= ~DWC3_EP_BUSY; 2392 + if (event->status & DEPEVT_STATUS_MISSED_ISOC) { 2393 + status = -EXDEV; 2394 + 2395 + if (list_empty(&dep->started_list)) 2396 + stop = true; 2397 + } 2398 + 2399 + dwc3_gadget_ep_cleanup_completed_requests(dep, event, status); 2400 + 2401 + if (stop) { 2402 + dwc3_stop_active_transfer(dep, true); 2403 + dep->flags = DWC3_EP_ENABLED; 2404 + } 2418 2405 2419 2406 /* 2420 2407 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. ··· 2421 2446 2422 2447 dwc->u1u2 = 0; 2423 2448 } 2449 + } 2424 2450 2425 - /* 2426 - * Our endpoint might get disabled by another thread during 2427 - * dwc3_gadget_giveback(). If that happens, we're just gonna return 1 2428 - * early on so DWC3_EP_BUSY flag gets cleared 2429 - */ 2430 - if (!dep->endpoint.desc) 2431 - return; 2432 - 2433 - if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) 2434 - __dwc3_gadget_kick_transfer(dep); 2451 + static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep, 2452 + const struct dwc3_event_depevt *event) 2453 + { 2454 + dwc3_gadget_endpoint_frame_from_event(dep, event); 2455 + __dwc3_gadget_start_isoc(dep); 2435 2456 } 2436 2457 2437 2458 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, ··· 2454 2483 } 2455 2484 2456 2485 switch (event->endpoint_event) { 2457 - case DWC3_DEPEVT_XFERCOMPLETE: 2458 - dep->resource_index = 0; 2459 - 2460 - if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2461 - dev_err(dwc->dev, "XferComplete for Isochronous endpoint\n"); 2462 - return; 2463 - } 2464 - 2465 - dwc3_endpoint_transfer_complete(dwc, dep, event); 2466 - break; 2467 2486 case DWC3_DEPEVT_XFERINPROGRESS: 2468 - dwc3_endpoint_transfer_complete(dwc, dep, event); 2487 + dwc3_gadget_endpoint_transfer_in_progress(dep, event); 2469 2488 break; 2470 2489 case DWC3_DEPEVT_XFERNOTREADY: 2471 - if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) 2472 - dwc3_gadget_start_isoc(dwc, dep, event); 2473 - else 2474 - __dwc3_gadget_kick_transfer(dep); 2475 - 2476 - break; 2477 - case DWC3_DEPEVT_STREAMEVT: 2478 - if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 2479 - dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 2480 - dep->name); 2481 - return; 2482 - } 2490 + dwc3_gadget_endpoint_transfer_not_ready(dep, event); 2483 2491 break; 2484 2492 case DWC3_DEPEVT_EPCMDCMPLT: 2485 2493 cmd = DEPEVT_PARAMETER_CMD(event->parameters); ··· 2468 2518 wake_up(&dep->wait_end_transfer); 2469 2519 } 2470 2520 break; 2521 + case DWC3_DEPEVT_STREAMEVT: 2522 + case DWC3_DEPEVT_XFERCOMPLETE: 2471 2523 case DWC3_DEPEVT_RXTXFIFOEVT: 2472 2524 break; 2473 2525 } ··· 2514 2562 } 2515 2563 } 2516 2564 2517 - static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2565 + static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force) 2518 2566 { 2519 - struct dwc3_ep *dep; 2567 + struct dwc3 *dwc = dep->dwc; 2520 2568 struct dwc3_gadget_ep_cmd_params params; 2521 2569 u32 cmd; 2522 2570 int ret; 2523 - 2524 - dep = dwc->eps[epnum]; 2525 2571 2526 2572 if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) || 2527 2573 !dep->resource_index) ··· 2564 2614 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params); 2565 2615 WARN_ON_ONCE(ret); 2566 2616 dep->resource_index = 0; 2567 - dep->flags &= ~DWC3_EP_BUSY; 2568 2617 2569 2618 if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) { 2570 2619 dep->flags |= DWC3_EP_END_TRANSFER_PENDING; ··· 2765 2816 } 2766 2817 2767 2818 dep = dwc->eps[0]; 2768 - ret = __dwc3_gadget_ep_enable(dep, true, false); 2819 + ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY); 2769 2820 if (ret) { 2770 2821 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2771 2822 return; 2772 2823 } 2773 2824 2774 2825 dep = dwc->eps[1]; 2775 - ret = __dwc3_gadget_ep_enable(dep, true, false); 2826 + ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY); 2776 2827 if (ret) { 2777 2828 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2778 2829 return;
+2 -3
drivers/usb/dwc3/gadget.h
··· 98 98 * Caller should take care of locking. Returns the transfer resource 99 99 * index for a given endpoint. 100 100 */ 101 - static inline u32 dwc3_gadget_ep_get_transfer_index(struct dwc3_ep *dep) 101 + static inline void dwc3_gadget_ep_get_transfer_index(struct dwc3_ep *dep) 102 102 { 103 103 u32 res_id; 104 104 105 105 res_id = dwc3_readl(dep->regs, DWC3_DEPCMD); 106 - 107 - return DWC3_DEPCMD_GET_RSC_IDX(res_id); 106 + dep->resource_index = DWC3_DEPCMD_GET_RSC_IDX(res_id); 108 107 } 109 108 110 109 #endif /* __DRIVERS_USB_DWC3_GADGET_H */
+4 -8
drivers/usb/dwc3/trace.h
··· 230 230 TP_fast_assign( 231 231 __assign_str(name, dep->name); 232 232 __entry->trb = trb; 233 - __entry->allocated = dep->allocated_requests; 234 - __entry->queued = dep->queued_requests; 235 233 __entry->bpl = trb->bpl; 236 234 __entry->bph = trb->bph; 237 235 __entry->size = trb->size; 238 236 __entry->ctrl = trb->ctrl; 239 237 __entry->type = usb_endpoint_type(dep->endpoint.desc); 240 238 ), 241 - TP_printk("%s: %d/%d trb %p buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)", 242 - __get_str(name), __entry->queued, __entry->allocated, 243 - __entry->trb, __entry->bph, __entry->bpl, 239 + TP_printk("%s: trb %p buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)", 240 + __get_str(name), __entry->trb, __entry->bph, __entry->bpl, 244 241 ({char *s; 245 242 int pcm = ((__entry->size >> 24) & 3) + 1; 246 243 switch (__entry->type) { ··· 303 306 __entry->trb_enqueue = dep->trb_enqueue; 304 307 __entry->trb_dequeue = dep->trb_dequeue; 305 308 ), 306 - TP_printk("%s: mps %d/%d streams %d burst %d ring %d/%d flags %c:%c%c%c%c%c:%c:%c", 309 + TP_printk("%s: mps %d/%d streams %d burst %d ring %d/%d flags %c:%c%c%c%c:%c:%c", 307 310 __get_str(name), __entry->maxpacket, 308 311 __entry->maxpacket_limit, __entry->max_streams, 309 312 __entry->maxburst, __entry->trb_enqueue, ··· 311 314 __entry->flags & DWC3_EP_ENABLED ? 'E' : 'e', 312 315 __entry->flags & DWC3_EP_STALL ? 'S' : 's', 313 316 __entry->flags & DWC3_EP_WEDGE ? 'W' : 'w', 314 - __entry->flags & DWC3_EP_BUSY ? 'B' : 'b', 317 + __entry->flags & DWC3_EP_TRANSFER_STARTED ? 'B' : 'b', 315 318 __entry->flags & DWC3_EP_PENDING_REQUEST ? 'P' : 'p', 316 - __entry->flags & DWC3_EP_MISSED_ISOC ? 'M' : 'm', 317 319 __entry->flags & DWC3_EP_END_TRANSFER_PENDING ? 'E' : 'e', 318 320 __entry->direction ? '<' : '>' 319 321 )
+1 -1
drivers/usb/gadget/composite.c
··· 1601 1601 cdev->gadget->ep0->maxpacket; 1602 1602 if (gadget_is_superspeed(gadget)) { 1603 1603 if (gadget->speed >= USB_SPEED_SUPER) { 1604 - cdev->desc.bcdUSB = cpu_to_le16(0x0310); 1604 + cdev->desc.bcdUSB = cpu_to_le16(0x0320); 1605 1605 cdev->desc.bMaxPacketSize0 = 9; 1606 1606 } else { 1607 1607 cdev->desc.bcdUSB = cpu_to_le16(0x0210);
+2 -1
drivers/usb/gadget/function/f_ecm.c
··· 705 705 ecm_opts->bound = true; 706 706 } 707 707 708 + ecm_string_defs[1].s = ecm->ethaddr; 709 + 708 710 us = usb_gstrings_attach(cdev, ecm_strings, 709 711 ARRAY_SIZE(ecm_string_defs)); 710 712 if (IS_ERR(us)) ··· 930 928 mutex_unlock(&opts->lock); 931 929 return ERR_PTR(-EINVAL); 932 930 } 933 - ecm_string_defs[1].s = ecm->ethaddr; 934 931 935 932 ecm->port.ioport = netdev_priv(opts->net); 936 933 mutex_unlock(&opts->lock);
+11
drivers/usb/gadget/function/f_fs.c
··· 1266 1266 return ret; 1267 1267 } 1268 1268 1269 + #ifdef CONFIG_COMPAT 1270 + static long ffs_epfile_compat_ioctl(struct file *file, unsigned code, 1271 + unsigned long value) 1272 + { 1273 + return ffs_epfile_ioctl(file, code, value); 1274 + } 1275 + #endif 1276 + 1269 1277 static const struct file_operations ffs_epfile_operations = { 1270 1278 .llseek = no_llseek, 1271 1279 ··· 1282 1274 .read_iter = ffs_epfile_read_iter, 1283 1275 .release = ffs_epfile_release, 1284 1276 .unlocked_ioctl = ffs_epfile_ioctl, 1277 + #ifdef CONFIG_COMPAT 1278 + .compat_ioctl = ffs_epfile_compat_ioctl, 1279 + #endif 1285 1280 }; 1286 1281 1287 1282
+20 -6
drivers/usb/gadget/function/f_midi.c
··· 109 109 110 110 static void f_midi_transmit(struct f_midi *midi); 111 111 static void f_midi_rmidi_free(struct snd_rawmidi *rmidi); 112 + static void f_midi_free_inst(struct usb_function_instance *f); 112 113 113 114 DECLARE_UAC_AC_HEADER_DESCRIPTOR(1); 114 115 DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1); ··· 1103 1102 u32 num; \ 1104 1103 \ 1105 1104 mutex_lock(&opts->lock); \ 1106 - if (opts->refcnt) { \ 1105 + if (opts->refcnt > 1) { \ 1107 1106 ret = -EBUSY; \ 1108 1107 goto end; \ 1109 1108 } \ ··· 1158 1157 char *c; 1159 1158 1160 1159 mutex_lock(&opts->lock); 1161 - if (opts->refcnt) { 1160 + if (opts->refcnt > 1) { 1162 1161 ret = -EBUSY; 1163 1162 goto end; 1164 1163 } ··· 1199 1198 static void f_midi_free_inst(struct usb_function_instance *f) 1200 1199 { 1201 1200 struct f_midi_opts *opts; 1201 + bool free = false; 1202 1202 1203 1203 opts = container_of(f, struct f_midi_opts, func_inst); 1204 1204 1205 - if (opts->id_allocated) 1206 - kfree(opts->id); 1205 + mutex_lock(&opts->lock); 1206 + if (!--opts->refcnt) { 1207 + free = true; 1208 + } 1209 + mutex_unlock(&opts->lock); 1207 1210 1208 - kfree(opts); 1211 + if (free) { 1212 + if (opts->id_allocated) 1213 + kfree(opts->id); 1214 + kfree(opts); 1215 + } 1209 1216 } 1210 1217 1211 1218 static struct usb_function_instance *f_midi_alloc_inst(void) ··· 1232 1223 opts->qlen = 32; 1233 1224 opts->in_ports = 1; 1234 1225 opts->out_ports = 1; 1226 + opts->refcnt = 1; 1235 1227 1236 1228 config_group_init_type_name(&opts->func_inst.group, "", 1237 1229 &midi_func_type); ··· 1244 1234 { 1245 1235 struct f_midi *midi; 1246 1236 struct f_midi_opts *opts; 1237 + bool free = false; 1247 1238 1248 1239 midi = func_to_midi(f); 1249 1240 opts = container_of(f->fi, struct f_midi_opts, func_inst); ··· 1253 1242 kfree(midi->id); 1254 1243 kfifo_free(&midi->in_req_fifo); 1255 1244 kfree(midi); 1256 - --opts->refcnt; 1245 + free = true; 1257 1246 } 1258 1247 mutex_unlock(&opts->lock); 1248 + 1249 + if (free) 1250 + f_midi_free_inst(&opts->func_inst); 1259 1251 } 1260 1252 1261 1253 static void f_midi_rmidi_free(struct snd_rawmidi *rmidi)
+3
drivers/usb/gadget/function/rndis.c
··· 851 851 */ 852 852 pr_warn("%s: unknown RNDIS message 0x%08X len %d\n", 853 853 __func__, MsgType, MsgLength); 854 + /* Garbled message can be huge, so limit what we display */ 855 + if (MsgLength > 16) 856 + MsgLength = 16; 854 857 print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET, 855 858 buf, MsgLength); 856 859 break;
+4
drivers/usb/gadget/function/u_ether.c
··· 844 844 net->ethtool_ops = &ops; 845 845 SET_NETDEV_DEVTYPE(net, &gadget_type); 846 846 847 + /* MTU range: 14 - 15412 */ 848 + net->min_mtu = ETH_HLEN; 849 + net->max_mtu = GETHER_MAX_ETH_FRAME_LEN; 850 + 847 851 return net; 848 852 } 849 853 EXPORT_SYMBOL_GPL(gether_setup_name_default);
+2
drivers/usb/gadget/udc/Kconfig
··· 438 438 dynamically linked module called "udc-xilinx" and force all 439 439 gadget drivers to also be dynamically linked. 440 440 441 + source "drivers/usb/gadget/udc/aspeed-vhub/Kconfig" 442 + 441 443 # 442 444 # LAST -- dummy/emulated controller 443 445 #
+1
drivers/usb/gadget/udc/Makefile
··· 39 39 obj-$(CONFIG_USB_GR_UDC) += gr_udc.o 40 40 obj-$(CONFIG_USB_GADGET_XILINX) += udc-xilinx.o 41 41 obj-$(CONFIG_USB_SNP_UDC_PLAT) += snps_udc_plat.o 42 + obj-$(CONFIG_USB_ASPEED_VHUB) += aspeed-vhub/ 42 43 obj-$(CONFIG_USB_BDC_UDC) += bdc/
+7
drivers/usb/gadget/udc/aspeed-vhub/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0+ 2 + config USB_ASPEED_VHUB 3 + tristate "Aspeed vHub UDC driver" 4 + depends on ARCH_ASPEED || COMPILE_TEST 5 + help 6 + USB peripheral controller for the Aspeed AST2500 family 7 + SoCs supporting the "vHub" functionality and USB2.0
+4
drivers/usb/gadget/udc/aspeed-vhub/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0+ 2 + obj-$(CONFIG_USB_ASPEED_VHUB) += aspeed-vhub.o 3 + aspeed-vhub-y := core.o ep0.o epn.o dev.o hub.o 4 +
+425
drivers/usb/gadget/udc/aspeed-vhub/core.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget 4 + * 5 + * core.c - Top level support 6 + * 7 + * Copyright 2017 IBM Corporation 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License as published by 11 + * the Free Software Foundation; either version 2 of the License, or 12 + * (at your option) any later version. 13 + */ 14 + 15 + #include <linux/kernel.h> 16 + #include <linux/module.h> 17 + #include <linux/platform_device.h> 18 + #include <linux/delay.h> 19 + #include <linux/ioport.h> 20 + #include <linux/slab.h> 21 + #include <linux/errno.h> 22 + #include <linux/list.h> 23 + #include <linux/interrupt.h> 24 + #include <linux/proc_fs.h> 25 + #include <linux/prefetch.h> 26 + #include <linux/clk.h> 27 + #include <linux/usb/gadget.h> 28 + #include <linux/of.h> 29 + #include <linux/of_gpio.h> 30 + #include <linux/regmap.h> 31 + #include <linux/dma-mapping.h> 32 + 33 + #include "vhub.h" 34 + 35 + void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req, 36 + int status) 37 + { 38 + bool internal = req->internal; 39 + 40 + EPVDBG(ep, "completing request @%p, status %d\n", req, status); 41 + 42 + list_del_init(&req->queue); 43 + 44 + if (req->req.status == -EINPROGRESS) 45 + req->req.status = status; 46 + 47 + if (req->req.dma) { 48 + if (!WARN_ON(!ep->dev)) 49 + usb_gadget_unmap_request(&ep->dev->gadget, 50 + &req->req, ep->epn.is_in); 51 + req->req.dma = 0; 52 + } 53 + 54 + /* 55 + * If this isn't an internal EP0 request, call the core 56 + * to call the gadget completion. 57 + */ 58 + if (!internal) { 59 + spin_unlock(&ep->vhub->lock); 60 + usb_gadget_giveback_request(&ep->ep, &req->req); 61 + spin_lock(&ep->vhub->lock); 62 + } 63 + } 64 + 65 + void ast_vhub_nuke(struct ast_vhub_ep *ep, int status) 66 + { 67 + struct ast_vhub_req *req; 68 + 69 + EPDBG(ep, "Nuking\n"); 70 + 71 + /* Beware, lock will be dropped & req-acquired by done() */ 72 + while (!list_empty(&ep->queue)) { 73 + req = list_first_entry(&ep->queue, struct ast_vhub_req, queue); 74 + ast_vhub_done(ep, req, status); 75 + } 76 + } 77 + 78 + struct usb_request *ast_vhub_alloc_request(struct usb_ep *u_ep, 79 + gfp_t gfp_flags) 80 + { 81 + struct ast_vhub_req *req; 82 + 83 + req = kzalloc(sizeof(*req), gfp_flags); 84 + if (!req) 85 + return NULL; 86 + return &req->req; 87 + } 88 + 89 + void ast_vhub_free_request(struct usb_ep *u_ep, struct usb_request *u_req) 90 + { 91 + struct ast_vhub_req *req = to_ast_req(u_req); 92 + 93 + kfree(req); 94 + } 95 + 96 + static irqreturn_t ast_vhub_irq(int irq, void *data) 97 + { 98 + struct ast_vhub *vhub = data; 99 + irqreturn_t iret = IRQ_NONE; 100 + u32 istat; 101 + 102 + /* Stale interrupt while tearing down */ 103 + if (!vhub->ep0_bufs) 104 + return IRQ_NONE; 105 + 106 + spin_lock(&vhub->lock); 107 + 108 + /* Read and ACK interrupts */ 109 + istat = readl(vhub->regs + AST_VHUB_ISR); 110 + if (!istat) 111 + goto bail; 112 + writel(istat, vhub->regs + AST_VHUB_ISR); 113 + iret = IRQ_HANDLED; 114 + 115 + UDCVDBG(vhub, "irq status=%08x, ep_acks=%08x ep_nacks=%08x\n", 116 + istat, 117 + readl(vhub->regs + AST_VHUB_EP_ACK_ISR), 118 + readl(vhub->regs + AST_VHUB_EP_NACK_ISR)); 119 + 120 + /* Handle generic EPs first */ 121 + if (istat & VHUB_IRQ_EP_POOL_ACK_STALL) { 122 + u32 i, ep_acks = readl(vhub->regs + AST_VHUB_EP_ACK_ISR); 123 + writel(ep_acks, vhub->regs + AST_VHUB_EP_ACK_ISR); 124 + 125 + for (i = 0; ep_acks && i < AST_VHUB_NUM_GEN_EPs; i++) { 126 + u32 mask = VHUB_EP_IRQ(i); 127 + if (ep_acks & mask) { 128 + ast_vhub_epn_ack_irq(&vhub->epns[i]); 129 + ep_acks &= ~mask; 130 + } 131 + } 132 + } 133 + 134 + /* Handle device interrupts */ 135 + if (istat & (VHUB_IRQ_DEVICE1 | 136 + VHUB_IRQ_DEVICE2 | 137 + VHUB_IRQ_DEVICE3 | 138 + VHUB_IRQ_DEVICE4 | 139 + VHUB_IRQ_DEVICE5)) { 140 + if (istat & VHUB_IRQ_DEVICE1) 141 + ast_vhub_dev_irq(&vhub->ports[0].dev); 142 + if (istat & VHUB_IRQ_DEVICE2) 143 + ast_vhub_dev_irq(&vhub->ports[1].dev); 144 + if (istat & VHUB_IRQ_DEVICE3) 145 + ast_vhub_dev_irq(&vhub->ports[2].dev); 146 + if (istat & VHUB_IRQ_DEVICE4) 147 + ast_vhub_dev_irq(&vhub->ports[3].dev); 148 + if (istat & VHUB_IRQ_DEVICE5) 149 + ast_vhub_dev_irq(&vhub->ports[4].dev); 150 + } 151 + 152 + /* Handle top-level vHub EP0 interrupts */ 153 + if (istat & (VHUB_IRQ_HUB_EP0_OUT_ACK_STALL | 154 + VHUB_IRQ_HUB_EP0_IN_ACK_STALL | 155 + VHUB_IRQ_HUB_EP0_SETUP)) { 156 + if (istat & VHUB_IRQ_HUB_EP0_IN_ACK_STALL) 157 + ast_vhub_ep0_handle_ack(&vhub->ep0, true); 158 + if (istat & VHUB_IRQ_HUB_EP0_OUT_ACK_STALL) 159 + ast_vhub_ep0_handle_ack(&vhub->ep0, false); 160 + if (istat & VHUB_IRQ_HUB_EP0_SETUP) 161 + ast_vhub_ep0_handle_setup(&vhub->ep0); 162 + } 163 + 164 + /* Various top level bus events */ 165 + if (istat & (VHUB_IRQ_BUS_RESUME | 166 + VHUB_IRQ_BUS_SUSPEND | 167 + VHUB_IRQ_BUS_RESET)) { 168 + if (istat & VHUB_IRQ_BUS_RESUME) 169 + ast_vhub_hub_resume(vhub); 170 + if (istat & VHUB_IRQ_BUS_SUSPEND) 171 + ast_vhub_hub_suspend(vhub); 172 + if (istat & VHUB_IRQ_BUS_RESET) 173 + ast_vhub_hub_reset(vhub); 174 + } 175 + 176 + bail: 177 + spin_unlock(&vhub->lock); 178 + return iret; 179 + } 180 + 181 + void ast_vhub_init_hw(struct ast_vhub *vhub) 182 + { 183 + u32 ctrl; 184 + 185 + UDCDBG(vhub,"(Re)Starting HW ...\n"); 186 + 187 + /* Enable PHY */ 188 + ctrl = VHUB_CTRL_PHY_CLK | 189 + VHUB_CTRL_PHY_RESET_DIS; 190 + 191 + /* 192 + * We do *NOT* set the VHUB_CTRL_CLK_STOP_SUSPEND bit 193 + * to stop the logic clock during suspend because 194 + * it causes the registers to become inaccessible and 195 + * we haven't yet figured out a good wayt to bring the 196 + * controller back into life to issue a wakeup. 197 + */ 198 + 199 + /* 200 + * Set some ISO & split control bits according to Aspeed 201 + * recommendation 202 + * 203 + * VHUB_CTRL_ISO_RSP_CTRL: When set tells the HW to respond 204 + * with 0 bytes data packet to ISO IN endpoints when no data 205 + * is available. 206 + * 207 + * VHUB_CTRL_SPLIT_IN: This makes a SOF complete a split IN 208 + * transaction. 209 + */ 210 + ctrl |= VHUB_CTRL_ISO_RSP_CTRL | VHUB_CTRL_SPLIT_IN; 211 + writel(ctrl, vhub->regs + AST_VHUB_CTRL); 212 + udelay(1); 213 + 214 + /* Set descriptor ring size */ 215 + if (AST_VHUB_DESCS_COUNT == 256) { 216 + ctrl |= VHUB_CTRL_LONG_DESC; 217 + writel(ctrl, vhub->regs + AST_VHUB_CTRL); 218 + } else { 219 + BUILD_BUG_ON(AST_VHUB_DESCS_COUNT != 32); 220 + } 221 + 222 + /* Reset all devices */ 223 + writel(VHUB_SW_RESET_ALL, vhub->regs + AST_VHUB_SW_RESET); 224 + udelay(1); 225 + writel(0, vhub->regs + AST_VHUB_SW_RESET); 226 + 227 + /* Disable and cleanup EP ACK/NACK interrupts */ 228 + writel(0, vhub->regs + AST_VHUB_EP_ACK_IER); 229 + writel(0, vhub->regs + AST_VHUB_EP_NACK_IER); 230 + writel(VHUB_EP_IRQ_ALL, vhub->regs + AST_VHUB_EP_ACK_ISR); 231 + writel(VHUB_EP_IRQ_ALL, vhub->regs + AST_VHUB_EP_NACK_ISR); 232 + 233 + /* Default settings for EP0, enable HW hub EP1 */ 234 + writel(0, vhub->regs + AST_VHUB_EP0_CTRL); 235 + writel(VHUB_EP1_CTRL_RESET_TOGGLE | 236 + VHUB_EP1_CTRL_ENABLE, 237 + vhub->regs + AST_VHUB_EP1_CTRL); 238 + writel(0, vhub->regs + AST_VHUB_EP1_STS_CHG); 239 + 240 + /* Configure EP0 DMA buffer */ 241 + writel(vhub->ep0.buf_dma, vhub->regs + AST_VHUB_EP0_DATA); 242 + 243 + /* Clear address */ 244 + writel(0, vhub->regs + AST_VHUB_CONF); 245 + 246 + /* Pullup hub (activate on host) */ 247 + if (vhub->force_usb1) 248 + ctrl |= VHUB_CTRL_FULL_SPEED_ONLY; 249 + 250 + ctrl |= VHUB_CTRL_UPSTREAM_CONNECT; 251 + writel(ctrl, vhub->regs + AST_VHUB_CTRL); 252 + 253 + /* Enable some interrupts */ 254 + writel(VHUB_IRQ_HUB_EP0_IN_ACK_STALL | 255 + VHUB_IRQ_HUB_EP0_OUT_ACK_STALL | 256 + VHUB_IRQ_HUB_EP0_SETUP | 257 + VHUB_IRQ_EP_POOL_ACK_STALL | 258 + VHUB_IRQ_BUS_RESUME | 259 + VHUB_IRQ_BUS_SUSPEND | 260 + VHUB_IRQ_BUS_RESET, 261 + vhub->regs + AST_VHUB_IER); 262 + } 263 + 264 + static int ast_vhub_remove(struct platform_device *pdev) 265 + { 266 + struct ast_vhub *vhub = platform_get_drvdata(pdev); 267 + unsigned long flags; 268 + int i; 269 + 270 + if (!vhub || !vhub->regs) 271 + return 0; 272 + 273 + /* Remove devices */ 274 + for (i = 0; i < AST_VHUB_NUM_PORTS; i++) 275 + ast_vhub_del_dev(&vhub->ports[i].dev); 276 + 277 + spin_lock_irqsave(&vhub->lock, flags); 278 + 279 + /* Mask & ack all interrupts */ 280 + writel(0, vhub->regs + AST_VHUB_IER); 281 + writel(VHUB_IRQ_ACK_ALL, vhub->regs + AST_VHUB_ISR); 282 + 283 + /* Pull device, leave PHY enabled */ 284 + writel(VHUB_CTRL_PHY_CLK | 285 + VHUB_CTRL_PHY_RESET_DIS, 286 + vhub->regs + AST_VHUB_CTRL); 287 + 288 + if (vhub->clk) 289 + clk_disable_unprepare(vhub->clk); 290 + 291 + spin_unlock_irqrestore(&vhub->lock, flags); 292 + 293 + if (vhub->ep0_bufs) 294 + dma_free_coherent(&pdev->dev, 295 + AST_VHUB_EP0_MAX_PACKET * 296 + (AST_VHUB_NUM_PORTS + 1), 297 + vhub->ep0_bufs, 298 + vhub->ep0_bufs_dma); 299 + vhub->ep0_bufs = NULL; 300 + 301 + return 0; 302 + } 303 + 304 + static int ast_vhub_probe(struct platform_device *pdev) 305 + { 306 + enum usb_device_speed max_speed; 307 + struct ast_vhub *vhub; 308 + struct resource *res; 309 + int i, rc = 0; 310 + 311 + vhub = devm_kzalloc(&pdev->dev, sizeof(*vhub), GFP_KERNEL); 312 + if (!vhub) 313 + return -ENOMEM; 314 + 315 + spin_lock_init(&vhub->lock); 316 + vhub->pdev = pdev; 317 + 318 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 319 + vhub->regs = devm_ioremap_resource(&pdev->dev, res); 320 + if (IS_ERR(vhub->regs)) { 321 + dev_err(&pdev->dev, "Failed to map resources\n"); 322 + return PTR_ERR(vhub->regs); 323 + } 324 + UDCDBG(vhub, "vHub@%pR mapped @%p\n", res, vhub->regs); 325 + 326 + platform_set_drvdata(pdev, vhub); 327 + 328 + vhub->clk = devm_clk_get(&pdev->dev, NULL); 329 + if (IS_ERR(vhub->clk)) { 330 + rc = PTR_ERR(vhub->clk); 331 + goto err; 332 + } 333 + rc = clk_prepare_enable(vhub->clk); 334 + if (rc) { 335 + dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", rc); 336 + goto err; 337 + } 338 + 339 + /* Check if we need to limit the HW to USB1 */ 340 + max_speed = usb_get_maximum_speed(&pdev->dev); 341 + if (max_speed != USB_SPEED_UNKNOWN && max_speed < USB_SPEED_HIGH) 342 + vhub->force_usb1 = true; 343 + 344 + /* Mask & ack all interrupts before installing the handler */ 345 + writel(0, vhub->regs + AST_VHUB_IER); 346 + writel(VHUB_IRQ_ACK_ALL, vhub->regs + AST_VHUB_ISR); 347 + 348 + /* Find interrupt and install handler */ 349 + vhub->irq = platform_get_irq(pdev, 0); 350 + if (vhub->irq < 0) { 351 + dev_err(&pdev->dev, "Failed to get interrupt\n"); 352 + rc = vhub->irq; 353 + goto err; 354 + } 355 + rc = devm_request_irq(&pdev->dev, vhub->irq, ast_vhub_irq, 0, 356 + KBUILD_MODNAME, vhub); 357 + if (rc) { 358 + dev_err(&pdev->dev, "Failed to request interrupt\n"); 359 + goto err; 360 + } 361 + 362 + /* 363 + * Allocate DMA buffers for all EP0s in one chunk, 364 + * one per port and one for the vHub itself 365 + */ 366 + vhub->ep0_bufs = dma_alloc_coherent(&pdev->dev, 367 + AST_VHUB_EP0_MAX_PACKET * 368 + (AST_VHUB_NUM_PORTS + 1), 369 + &vhub->ep0_bufs_dma, GFP_KERNEL); 370 + if (!vhub->ep0_bufs) { 371 + dev_err(&pdev->dev, "Failed to allocate EP0 DMA buffers\n"); 372 + rc = -ENOMEM; 373 + goto err; 374 + } 375 + UDCVDBG(vhub, "EP0 DMA buffers @%p (DMA 0x%08x)\n", 376 + vhub->ep0_bufs, (u32)vhub->ep0_bufs_dma); 377 + 378 + /* Init vHub EP0 */ 379 + ast_vhub_init_ep0(vhub, &vhub->ep0, NULL); 380 + 381 + /* Init devices */ 382 + for (i = 0; i < AST_VHUB_NUM_PORTS && rc == 0; i++) 383 + rc = ast_vhub_init_dev(vhub, i); 384 + if (rc) 385 + goto err; 386 + 387 + /* Init hub emulation */ 388 + ast_vhub_init_hub(vhub); 389 + 390 + /* Initialize HW */ 391 + ast_vhub_init_hw(vhub); 392 + 393 + dev_info(&pdev->dev, "Initialized virtual hub in USB%d mode\n", 394 + vhub->force_usb1 ? 1 : 2); 395 + 396 + return 0; 397 + err: 398 + ast_vhub_remove(pdev); 399 + return rc; 400 + } 401 + 402 + static const struct of_device_id ast_vhub_dt_ids[] = { 403 + { 404 + .compatible = "aspeed,ast2400-usb-vhub", 405 + }, 406 + { 407 + .compatible = "aspeed,ast2500-usb-vhub", 408 + }, 409 + { } 410 + }; 411 + MODULE_DEVICE_TABLE(of, ast_vhub_dt_ids); 412 + 413 + static struct platform_driver ast_vhub_driver = { 414 + .probe = ast_vhub_probe, 415 + .remove = ast_vhub_remove, 416 + .driver = { 417 + .name = KBUILD_MODNAME, 418 + .of_match_table = ast_vhub_dt_ids, 419 + }, 420 + }; 421 + module_platform_driver(ast_vhub_driver); 422 + 423 + MODULE_DESCRIPTION("Aspeed vHub udc driver"); 424 + MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); 425 + MODULE_LICENSE("GPL");
+589
drivers/usb/gadget/udc/aspeed-vhub/dev.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget 4 + * 5 + * dev.c - Individual device/gadget management (ie, a port = a gadget) 6 + * 7 + * Copyright 2017 IBM Corporation 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License as published by 11 + * the Free Software Foundation; either version 2 of the License, or 12 + * (at your option) any later version. 13 + */ 14 + 15 + #include <linux/kernel.h> 16 + #include <linux/module.h> 17 + #include <linux/platform_device.h> 18 + #include <linux/delay.h> 19 + #include <linux/ioport.h> 20 + #include <linux/slab.h> 21 + #include <linux/errno.h> 22 + #include <linux/list.h> 23 + #include <linux/interrupt.h> 24 + #include <linux/proc_fs.h> 25 + #include <linux/prefetch.h> 26 + #include <linux/clk.h> 27 + #include <linux/usb/gadget.h> 28 + #include <linux/of.h> 29 + #include <linux/of_gpio.h> 30 + #include <linux/regmap.h> 31 + #include <linux/dma-mapping.h> 32 + #include <linux/usb.h> 33 + #include <linux/usb/hcd.h> 34 + 35 + #include "vhub.h" 36 + 37 + void ast_vhub_dev_irq(struct ast_vhub_dev *d) 38 + { 39 + u32 istat = readl(d->regs + AST_VHUB_DEV_ISR); 40 + 41 + writel(istat, d->regs + AST_VHUB_DEV_ISR); 42 + 43 + if (istat & VHUV_DEV_IRQ_EP0_IN_ACK_STALL) 44 + ast_vhub_ep0_handle_ack(&d->ep0, true); 45 + if (istat & VHUV_DEV_IRQ_EP0_OUT_ACK_STALL) 46 + ast_vhub_ep0_handle_ack(&d->ep0, false); 47 + if (istat & VHUV_DEV_IRQ_EP0_SETUP) 48 + ast_vhub_ep0_handle_setup(&d->ep0); 49 + } 50 + 51 + static void ast_vhub_dev_enable(struct ast_vhub_dev *d) 52 + { 53 + u32 reg, hmsk; 54 + 55 + if (d->enabled) 56 + return; 57 + 58 + /* Enable device and its EP0 interrupts */ 59 + reg = VHUB_DEV_EN_ENABLE_PORT | 60 + VHUB_DEV_EN_EP0_IN_ACK_IRQEN | 61 + VHUB_DEV_EN_EP0_OUT_ACK_IRQEN | 62 + VHUB_DEV_EN_EP0_SETUP_IRQEN; 63 + if (d->gadget.speed == USB_SPEED_HIGH) 64 + reg |= VHUB_DEV_EN_SPEED_SEL_HIGH; 65 + writel(reg, d->regs + AST_VHUB_DEV_EN_CTRL); 66 + 67 + /* Enable device interrupt in the hub as well */ 68 + hmsk = VHUB_IRQ_DEVICE1 << d->index; 69 + reg = readl(d->vhub->regs + AST_VHUB_IER); 70 + reg |= hmsk; 71 + writel(reg, d->vhub->regs + AST_VHUB_IER); 72 + 73 + /* Set EP0 DMA buffer address */ 74 + writel(d->ep0.buf_dma, d->regs + AST_VHUB_DEV_EP0_DATA); 75 + 76 + d->enabled = true; 77 + } 78 + 79 + static void ast_vhub_dev_disable(struct ast_vhub_dev *d) 80 + { 81 + u32 reg, hmsk; 82 + 83 + if (!d->enabled) 84 + return; 85 + 86 + /* Disable device interrupt in the hub */ 87 + hmsk = VHUB_IRQ_DEVICE1 << d->index; 88 + reg = readl(d->vhub->regs + AST_VHUB_IER); 89 + reg &= ~hmsk; 90 + writel(reg, d->vhub->regs + AST_VHUB_IER); 91 + 92 + /* Then disable device */ 93 + writel(0, d->regs + AST_VHUB_DEV_EN_CTRL); 94 + d->gadget.speed = USB_SPEED_UNKNOWN; 95 + d->enabled = false; 96 + d->suspended = false; 97 + } 98 + 99 + static int ast_vhub_dev_feature(struct ast_vhub_dev *d, 100 + u16 wIndex, u16 wValue, 101 + bool is_set) 102 + { 103 + DDBG(d, "%s_FEATURE(dev val=%02x)\n", 104 + is_set ? "SET" : "CLEAR", wValue); 105 + 106 + if (wValue != USB_DEVICE_REMOTE_WAKEUP) 107 + return std_req_driver; 108 + 109 + d->wakeup_en = is_set; 110 + 111 + return std_req_complete; 112 + } 113 + 114 + static int ast_vhub_ep_feature(struct ast_vhub_dev *d, 115 + u16 wIndex, u16 wValue, bool is_set) 116 + { 117 + struct ast_vhub_ep *ep; 118 + int ep_num; 119 + 120 + ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK; 121 + DDBG(d, "%s_FEATURE(ep%d val=%02x)\n", 122 + is_set ? "SET" : "CLEAR", ep_num, wValue); 123 + if (ep_num == 0) 124 + return std_req_complete; 125 + if (ep_num >= AST_VHUB_NUM_GEN_EPs || !d->epns[ep_num - 1]) 126 + return std_req_stall; 127 + if (wValue != USB_ENDPOINT_HALT) 128 + return std_req_driver; 129 + 130 + ep = d->epns[ep_num - 1]; 131 + if (WARN_ON(!ep)) 132 + return std_req_stall; 133 + 134 + if (!ep->epn.enabled || !ep->ep.desc || ep->epn.is_iso || 135 + ep->epn.is_in != !!(wIndex & USB_DIR_IN)) 136 + return std_req_stall; 137 + 138 + DDBG(d, "%s stall on EP %d\n", 139 + is_set ? "setting" : "clearing", ep_num); 140 + ep->epn.stalled = is_set; 141 + ast_vhub_update_epn_stall(ep); 142 + 143 + return std_req_complete; 144 + } 145 + 146 + static int ast_vhub_dev_status(struct ast_vhub_dev *d, 147 + u16 wIndex, u16 wValue) 148 + { 149 + u8 st0; 150 + 151 + DDBG(d, "GET_STATUS(dev)\n"); 152 + 153 + st0 = d->gadget.is_selfpowered << USB_DEVICE_SELF_POWERED; 154 + if (d->wakeup_en) 155 + st0 |= 1 << USB_DEVICE_REMOTE_WAKEUP; 156 + 157 + return ast_vhub_simple_reply(&d->ep0, st0, 0); 158 + } 159 + 160 + static int ast_vhub_ep_status(struct ast_vhub_dev *d, 161 + u16 wIndex, u16 wValue) 162 + { 163 + int ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK; 164 + struct ast_vhub_ep *ep; 165 + u8 st0 = 0; 166 + 167 + DDBG(d, "GET_STATUS(ep%d)\n", ep_num); 168 + 169 + if (ep_num >= AST_VHUB_NUM_GEN_EPs) 170 + return std_req_stall; 171 + if (ep_num != 0) { 172 + ep = d->epns[ep_num - 1]; 173 + if (!ep) 174 + return std_req_stall; 175 + if (!ep->epn.enabled || !ep->ep.desc || ep->epn.is_iso || 176 + ep->epn.is_in != !!(wIndex & USB_DIR_IN)) 177 + return std_req_stall; 178 + if (ep->epn.stalled) 179 + st0 |= 1 << USB_ENDPOINT_HALT; 180 + } 181 + 182 + return ast_vhub_simple_reply(&d->ep0, st0, 0); 183 + } 184 + 185 + static void ast_vhub_dev_set_address(struct ast_vhub_dev *d, u8 addr) 186 + { 187 + u32 reg; 188 + 189 + DDBG(d, "SET_ADDRESS: Got address %x\n", addr); 190 + 191 + reg = readl(d->regs + AST_VHUB_DEV_EN_CTRL); 192 + reg &= ~VHUB_DEV_EN_ADDR_MASK; 193 + reg |= VHUB_DEV_EN_SET_ADDR(addr); 194 + writel(reg, d->regs + AST_VHUB_DEV_EN_CTRL); 195 + } 196 + 197 + int ast_vhub_std_dev_request(struct ast_vhub_ep *ep, 198 + struct usb_ctrlrequest *crq) 199 + { 200 + struct ast_vhub_dev *d = ep->dev; 201 + u16 wValue, wIndex; 202 + 203 + /* No driver, we shouldn't be enabled ... */ 204 + if (!d->driver || !d->enabled || d->suspended) { 205 + EPDBG(ep, 206 + "Device is wrong state driver=%p enabled=%d" 207 + " suspended=%d\n", 208 + d->driver, d->enabled, d->suspended); 209 + return std_req_stall; 210 + } 211 + 212 + /* First packet, grab speed */ 213 + if (d->gadget.speed == USB_SPEED_UNKNOWN) { 214 + d->gadget.speed = ep->vhub->speed; 215 + if (d->gadget.speed > d->driver->max_speed) 216 + d->gadget.speed = d->driver->max_speed; 217 + DDBG(d, "fist packet, captured speed %d\n", 218 + d->gadget.speed); 219 + } 220 + 221 + wValue = le16_to_cpu(crq->wValue); 222 + wIndex = le16_to_cpu(crq->wIndex); 223 + 224 + switch ((crq->bRequestType << 8) | crq->bRequest) { 225 + /* SET_ADDRESS */ 226 + case DeviceOutRequest | USB_REQ_SET_ADDRESS: 227 + ast_vhub_dev_set_address(d, wValue); 228 + return std_req_complete; 229 + 230 + /* GET_STATUS */ 231 + case DeviceRequest | USB_REQ_GET_STATUS: 232 + return ast_vhub_dev_status(d, wIndex, wValue); 233 + case InterfaceRequest | USB_REQ_GET_STATUS: 234 + return ast_vhub_simple_reply(ep, 0, 0); 235 + case EndpointRequest | USB_REQ_GET_STATUS: 236 + return ast_vhub_ep_status(d, wIndex, wValue); 237 + 238 + /* SET/CLEAR_FEATURE */ 239 + case DeviceOutRequest | USB_REQ_SET_FEATURE: 240 + return ast_vhub_dev_feature(d, wIndex, wValue, true); 241 + case DeviceOutRequest | USB_REQ_CLEAR_FEATURE: 242 + return ast_vhub_dev_feature(d, wIndex, wValue, false); 243 + case EndpointOutRequest | USB_REQ_SET_FEATURE: 244 + return ast_vhub_ep_feature(d, wIndex, wValue, true); 245 + case EndpointOutRequest | USB_REQ_CLEAR_FEATURE: 246 + return ast_vhub_ep_feature(d, wIndex, wValue, false); 247 + } 248 + return std_req_driver; 249 + } 250 + 251 + static int ast_vhub_udc_wakeup(struct usb_gadget* gadget) 252 + { 253 + struct ast_vhub_dev *d = to_ast_dev(gadget); 254 + unsigned long flags; 255 + int rc = -EINVAL; 256 + 257 + spin_lock_irqsave(&d->vhub->lock, flags); 258 + if (!d->wakeup_en) 259 + goto err; 260 + 261 + DDBG(d, "Device initiated wakeup\n"); 262 + 263 + /* Wakeup the host */ 264 + ast_vhub_hub_wake_all(d->vhub); 265 + rc = 0; 266 + err: 267 + spin_unlock_irqrestore(&d->vhub->lock, flags); 268 + return rc; 269 + } 270 + 271 + static int ast_vhub_udc_get_frame(struct usb_gadget* gadget) 272 + { 273 + struct ast_vhub_dev *d = to_ast_dev(gadget); 274 + 275 + return (readl(d->vhub->regs + AST_VHUB_USBSTS) >> 16) & 0x7ff; 276 + } 277 + 278 + static void ast_vhub_dev_nuke(struct ast_vhub_dev *d) 279 + { 280 + unsigned int i; 281 + 282 + for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) { 283 + if (!d->epns[i]) 284 + continue; 285 + ast_vhub_nuke(d->epns[i], -ESHUTDOWN); 286 + } 287 + } 288 + 289 + static int ast_vhub_udc_pullup(struct usb_gadget* gadget, int on) 290 + { 291 + struct ast_vhub_dev *d = to_ast_dev(gadget); 292 + unsigned long flags; 293 + 294 + spin_lock_irqsave(&d->vhub->lock, flags); 295 + 296 + DDBG(d, "pullup(%d)\n", on); 297 + 298 + /* Mark disconnected in the hub */ 299 + ast_vhub_device_connect(d->vhub, d->index, on); 300 + 301 + /* 302 + * If enabled, nuke all requests if any (there shouldn't be) 303 + * and disable the port. This will clear the address too. 304 + */ 305 + if (d->enabled) { 306 + ast_vhub_dev_nuke(d); 307 + ast_vhub_dev_disable(d); 308 + } 309 + 310 + spin_unlock_irqrestore(&d->vhub->lock, flags); 311 + 312 + return 0; 313 + } 314 + 315 + static int ast_vhub_udc_start(struct usb_gadget *gadget, 316 + struct usb_gadget_driver *driver) 317 + { 318 + struct ast_vhub_dev *d = to_ast_dev(gadget); 319 + unsigned long flags; 320 + 321 + spin_lock_irqsave(&d->vhub->lock, flags); 322 + 323 + DDBG(d, "start\n"); 324 + 325 + /* We don't do much more until the hub enables us */ 326 + d->driver = driver; 327 + d->gadget.is_selfpowered = 1; 328 + 329 + spin_unlock_irqrestore(&d->vhub->lock, flags); 330 + 331 + return 0; 332 + } 333 + 334 + static struct usb_ep *ast_vhub_udc_match_ep(struct usb_gadget *gadget, 335 + struct usb_endpoint_descriptor *desc, 336 + struct usb_ss_ep_comp_descriptor *ss) 337 + { 338 + struct ast_vhub_dev *d = to_ast_dev(gadget); 339 + struct ast_vhub_ep *ep; 340 + struct usb_ep *u_ep; 341 + unsigned int max, addr, i; 342 + 343 + DDBG(d, "Match EP type %d\n", usb_endpoint_type(desc)); 344 + 345 + /* 346 + * First we need to look for an existing unclaimed EP as another 347 + * configuration may have already associated a bunch of EPs with 348 + * this gadget. This duplicates the code in usb_ep_autoconfig_ss() 349 + * unfortunately. 350 + */ 351 + list_for_each_entry(u_ep, &gadget->ep_list, ep_list) { 352 + if (usb_gadget_ep_match_desc(gadget, u_ep, desc, ss)) { 353 + DDBG(d, " -> using existing EP%d\n", 354 + to_ast_ep(u_ep)->d_idx); 355 + return u_ep; 356 + } 357 + } 358 + 359 + /* 360 + * We didn't find one, we need to grab one from the pool. 361 + * 362 + * First let's do some sanity checking 363 + */ 364 + switch(usb_endpoint_type(desc)) { 365 + case USB_ENDPOINT_XFER_CONTROL: 366 + /* Only EP0 can be a control endpoint */ 367 + return NULL; 368 + case USB_ENDPOINT_XFER_ISOC: 369 + /* ISO: limit 1023 bytes full speed, 1024 high/super speed */ 370 + if (gadget_is_dualspeed(gadget)) 371 + max = 1024; 372 + else 373 + max = 1023; 374 + break; 375 + case USB_ENDPOINT_XFER_BULK: 376 + if (gadget_is_dualspeed(gadget)) 377 + max = 512; 378 + else 379 + max = 64; 380 + break; 381 + case USB_ENDPOINT_XFER_INT: 382 + if (gadget_is_dualspeed(gadget)) 383 + max = 1024; 384 + else 385 + max = 64; 386 + break; 387 + } 388 + if (usb_endpoint_maxp(desc) > max) 389 + return NULL; 390 + 391 + /* 392 + * Find a free EP address for that device. We can't 393 + * let the generic code assign these as it would 394 + * create overlapping numbers for IN and OUT which 395 + * we don't support, so also create a suitable name 396 + * that will allow the generic code to use our 397 + * assigned address. 398 + */ 399 + for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) 400 + if (d->epns[i] == NULL) 401 + break; 402 + if (i >= AST_VHUB_NUM_GEN_EPs) 403 + return NULL; 404 + addr = i + 1; 405 + 406 + /* 407 + * Now grab an EP from the shared pool and associate 408 + * it with our device 409 + */ 410 + ep = ast_vhub_alloc_epn(d, addr); 411 + if (!ep) 412 + return NULL; 413 + DDBG(d, "Allocated epn#%d for port EP%d\n", 414 + ep->epn.g_idx, addr); 415 + 416 + return &ep->ep; 417 + } 418 + 419 + static int ast_vhub_udc_stop(struct usb_gadget *gadget) 420 + { 421 + struct ast_vhub_dev *d = to_ast_dev(gadget); 422 + unsigned long flags; 423 + 424 + spin_lock_irqsave(&d->vhub->lock, flags); 425 + 426 + DDBG(d, "stop\n"); 427 + 428 + d->driver = NULL; 429 + d->gadget.speed = USB_SPEED_UNKNOWN; 430 + 431 + ast_vhub_dev_nuke(d); 432 + 433 + if (d->enabled) 434 + ast_vhub_dev_disable(d); 435 + 436 + spin_unlock_irqrestore(&d->vhub->lock, flags); 437 + 438 + return 0; 439 + } 440 + 441 + static struct usb_gadget_ops ast_vhub_udc_ops = { 442 + .get_frame = ast_vhub_udc_get_frame, 443 + .wakeup = ast_vhub_udc_wakeup, 444 + .pullup = ast_vhub_udc_pullup, 445 + .udc_start = ast_vhub_udc_start, 446 + .udc_stop = ast_vhub_udc_stop, 447 + .match_ep = ast_vhub_udc_match_ep, 448 + }; 449 + 450 + void ast_vhub_dev_suspend(struct ast_vhub_dev *d) 451 + { 452 + d->suspended = true; 453 + if (d->driver) { 454 + spin_unlock(&d->vhub->lock); 455 + d->driver->suspend(&d->gadget); 456 + spin_lock(&d->vhub->lock); 457 + } 458 + } 459 + 460 + void ast_vhub_dev_resume(struct ast_vhub_dev *d) 461 + { 462 + d->suspended = false; 463 + if (d->driver) { 464 + spin_unlock(&d->vhub->lock); 465 + d->driver->resume(&d->gadget); 466 + spin_lock(&d->vhub->lock); 467 + } 468 + } 469 + 470 + void ast_vhub_dev_reset(struct ast_vhub_dev *d) 471 + { 472 + /* 473 + * If speed is not set, we enable the port. If it is, 474 + * send reset to the gadget and reset "speed". 475 + * 476 + * Speed is an indication that we have got the first 477 + * setup packet to the device. 478 + */ 479 + if (d->gadget.speed == USB_SPEED_UNKNOWN && !d->enabled) { 480 + DDBG(d, "Reset at unknown speed of disabled device, enabling...\n"); 481 + ast_vhub_dev_enable(d); 482 + d->suspended = false; 483 + } 484 + if (d->gadget.speed != USB_SPEED_UNKNOWN && d->driver) { 485 + unsigned int i; 486 + 487 + DDBG(d, "Reset at known speed of bound device, resetting...\n"); 488 + spin_unlock(&d->vhub->lock); 489 + d->driver->reset(&d->gadget); 490 + spin_lock(&d->vhub->lock); 491 + 492 + /* 493 + * Disable/re-enable HW, this will clear the address 494 + * and speed setting. 495 + */ 496 + ast_vhub_dev_disable(d); 497 + ast_vhub_dev_enable(d); 498 + 499 + /* Clear stall on all EPs */ 500 + for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) { 501 + struct ast_vhub_ep *ep = d->epns[i]; 502 + 503 + if (ep && ep->epn.stalled) { 504 + ep->epn.stalled = false; 505 + ast_vhub_update_epn_stall(ep); 506 + } 507 + } 508 + 509 + /* Additional cleanups */ 510 + d->wakeup_en = false; 511 + d->suspended = false; 512 + } 513 + } 514 + 515 + void ast_vhub_del_dev(struct ast_vhub_dev *d) 516 + { 517 + unsigned long flags; 518 + 519 + spin_lock_irqsave(&d->vhub->lock, flags); 520 + if (!d->registered) { 521 + spin_unlock_irqrestore(&d->vhub->lock, flags); 522 + return; 523 + } 524 + d->registered = false; 525 + spin_unlock_irqrestore(&d->vhub->lock, flags); 526 + 527 + usb_del_gadget_udc(&d->gadget); 528 + device_unregister(d->port_dev); 529 + } 530 + 531 + static void ast_vhub_dev_release(struct device *dev) 532 + { 533 + kfree(dev); 534 + } 535 + 536 + int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx) 537 + { 538 + struct ast_vhub_dev *d = &vhub->ports[idx].dev; 539 + struct device *parent = &vhub->pdev->dev; 540 + int rc; 541 + 542 + d->vhub = vhub; 543 + d->index = idx; 544 + d->name = devm_kasprintf(parent, GFP_KERNEL, "port%d", idx+1); 545 + d->regs = vhub->regs + 0x100 + 0x10 * idx; 546 + 547 + ast_vhub_init_ep0(vhub, &d->ep0, d); 548 + 549 + /* 550 + * The UDC core really needs us to have separate and uniquely 551 + * named "parent" devices for each port so we create a sub device 552 + * here for that purpose 553 + */ 554 + d->port_dev = kzalloc(sizeof(struct device), GFP_KERNEL); 555 + if (!d->port_dev) 556 + return -ENOMEM; 557 + device_initialize(d->port_dev); 558 + d->port_dev->release = ast_vhub_dev_release; 559 + d->port_dev->parent = parent; 560 + dev_set_name(d->port_dev, "%s:p%d", dev_name(parent), idx + 1); 561 + rc = device_add(d->port_dev); 562 + if (rc) 563 + goto fail_add; 564 + 565 + /* Populate gadget */ 566 + INIT_LIST_HEAD(&d->gadget.ep_list); 567 + d->gadget.ops = &ast_vhub_udc_ops; 568 + d->gadget.ep0 = &d->ep0.ep; 569 + d->gadget.name = KBUILD_MODNAME; 570 + if (vhub->force_usb1) 571 + d->gadget.max_speed = USB_SPEED_FULL; 572 + else 573 + d->gadget.max_speed = USB_SPEED_HIGH; 574 + d->gadget.speed = USB_SPEED_UNKNOWN; 575 + d->gadget.dev.of_node = vhub->pdev->dev.of_node; 576 + 577 + rc = usb_add_gadget_udc(d->port_dev, &d->gadget); 578 + if (rc != 0) 579 + goto fail_udc; 580 + d->registered = true; 581 + 582 + return 0; 583 + fail_udc: 584 + device_del(d->port_dev); 585 + fail_add: 586 + put_device(d->port_dev); 587 + 588 + return rc; 589 + }
+486
drivers/usb/gadget/udc/aspeed-vhub/ep0.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget 4 + * 5 + * ep0.c - Endpoint 0 handling 6 + * 7 + * Copyright 2017 IBM Corporation 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License as published by 11 + * the Free Software Foundation; either version 2 of the License, or 12 + * (at your option) any later version. 13 + */ 14 + 15 + #include <linux/kernel.h> 16 + #include <linux/module.h> 17 + #include <linux/platform_device.h> 18 + #include <linux/delay.h> 19 + #include <linux/ioport.h> 20 + #include <linux/slab.h> 21 + #include <linux/errno.h> 22 + #include <linux/list.h> 23 + #include <linux/interrupt.h> 24 + #include <linux/proc_fs.h> 25 + #include <linux/prefetch.h> 26 + #include <linux/clk.h> 27 + #include <linux/usb/gadget.h> 28 + #include <linux/of.h> 29 + #include <linux/of_gpio.h> 30 + #include <linux/regmap.h> 31 + #include <linux/dma-mapping.h> 32 + 33 + #include "vhub.h" 34 + 35 + int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len) 36 + { 37 + struct usb_request *req = &ep->ep0.req.req; 38 + int rc; 39 + 40 + if (WARN_ON(ep->d_idx != 0)) 41 + return std_req_stall; 42 + if (WARN_ON(!ep->ep0.dir_in)) 43 + return std_req_stall; 44 + if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET)) 45 + return std_req_stall; 46 + if (WARN_ON(req->status == -EINPROGRESS)) 47 + return std_req_stall; 48 + 49 + req->buf = ptr; 50 + req->length = len; 51 + req->complete = NULL; 52 + req->zero = true; 53 + 54 + /* 55 + * Call internal queue directly after dropping the lock. This is 56 + * safe to do as the reply is always the last thing done when 57 + * processing a SETUP packet, usually as a tail call 58 + */ 59 + spin_unlock(&ep->vhub->lock); 60 + if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC)) 61 + rc = std_req_stall; 62 + else 63 + rc = std_req_data; 64 + spin_lock(&ep->vhub->lock); 65 + return rc; 66 + } 67 + 68 + int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...) 69 + { 70 + u8 *buffer = ep->buf; 71 + unsigned int i; 72 + va_list args; 73 + 74 + va_start(args, len); 75 + 76 + /* Copy data directly into EP buffer */ 77 + for (i = 0; i < len; i++) 78 + buffer[i] = va_arg(args, int); 79 + va_end(args); 80 + 81 + /* req->buf NULL means data is already there */ 82 + return ast_vhub_reply(ep, NULL, len); 83 + } 84 + 85 + void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep) 86 + { 87 + struct usb_ctrlrequest crq; 88 + enum std_req_rc std_req_rc; 89 + int rc = -ENODEV; 90 + 91 + if (WARN_ON(ep->d_idx != 0)) 92 + return; 93 + 94 + /* 95 + * Grab the setup packet from the chip and byteswap 96 + * interesting fields 97 + */ 98 + memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq)); 99 + 100 + EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n", 101 + crq.bRequestType, crq.bRequest, 102 + le16_to_cpu(crq.wValue), 103 + le16_to_cpu(crq.wIndex), 104 + le16_to_cpu(crq.wLength), 105 + (crq.bRequestType & USB_DIR_IN) ? "in" : "out", 106 + ep->ep0.state); 107 + 108 + /* Check our state, cancel pending requests if needed */ 109 + if (ep->ep0.state != ep0_state_token) { 110 + EPDBG(ep, "wrong state\n"); 111 + ast_vhub_nuke(ep, 0); 112 + goto stall; 113 + } 114 + 115 + /* Calculate next state for EP0 */ 116 + ep->ep0.state = ep0_state_data; 117 + ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN); 118 + 119 + /* If this is the vHub, we handle requests differently */ 120 + std_req_rc = std_req_driver; 121 + if (ep->dev == NULL) { 122 + if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) 123 + std_req_rc = ast_vhub_std_hub_request(ep, &crq); 124 + else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) 125 + std_req_rc = ast_vhub_class_hub_request(ep, &crq); 126 + else 127 + std_req_rc = std_req_stall; 128 + } else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) 129 + std_req_rc = ast_vhub_std_dev_request(ep, &crq); 130 + 131 + /* Act upon result */ 132 + switch(std_req_rc) { 133 + case std_req_complete: 134 + goto complete; 135 + case std_req_stall: 136 + goto stall; 137 + case std_req_driver: 138 + break; 139 + case std_req_data: 140 + return; 141 + } 142 + 143 + /* Pass request up to the gadget driver */ 144 + if (WARN_ON(!ep->dev)) 145 + goto stall; 146 + if (ep->dev->driver) { 147 + EPDBG(ep, "forwarding to gadget...\n"); 148 + spin_unlock(&ep->vhub->lock); 149 + rc = ep->dev->driver->setup(&ep->dev->gadget, &crq); 150 + spin_lock(&ep->vhub->lock); 151 + EPDBG(ep, "driver returned %d\n", rc); 152 + } else { 153 + EPDBG(ep, "no gadget for request !\n"); 154 + } 155 + if (rc >= 0) 156 + return; 157 + 158 + stall: 159 + EPDBG(ep, "stalling\n"); 160 + writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat); 161 + ep->ep0.state = ep0_state_status; 162 + ep->ep0.dir_in = false; 163 + return; 164 + 165 + complete: 166 + EPVDBG(ep, "sending [in] status with no data\n"); 167 + writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat); 168 + ep->ep0.state = ep0_state_status; 169 + ep->ep0.dir_in = false; 170 + } 171 + 172 + 173 + static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep, 174 + struct ast_vhub_req *req) 175 + { 176 + unsigned int chunk; 177 + u32 reg; 178 + 179 + /* If this is a 0-length request, it's the gadget trying to 180 + * send a status on our behalf. We take it from here. 181 + */ 182 + if (req->req.length == 0) 183 + req->last_desc = 1; 184 + 185 + /* Are we done ? Complete request, otherwise wait for next interrupt */ 186 + if (req->last_desc >= 0) { 187 + EPVDBG(ep, "complete send %d/%d\n", 188 + req->req.actual, req->req.length); 189 + ep->ep0.state = ep0_state_status; 190 + writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat); 191 + ast_vhub_done(ep, req, 0); 192 + return; 193 + } 194 + 195 + /* 196 + * Next chunk cropped to max packet size. Also check if this 197 + * is the last packet 198 + */ 199 + chunk = req->req.length - req->req.actual; 200 + if (chunk > ep->ep.maxpacket) 201 + chunk = ep->ep.maxpacket; 202 + else if ((chunk < ep->ep.maxpacket) || !req->req.zero) 203 + req->last_desc = 1; 204 + 205 + EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n", 206 + chunk, req->last_desc, req->req.actual, ep->ep.maxpacket); 207 + 208 + /* 209 + * Copy data if any (internal requests already have data 210 + * in the EP buffer) 211 + */ 212 + if (chunk && req->req.buf) 213 + memcpy(ep->buf, req->req.buf + req->req.actual, chunk); 214 + 215 + /* Remember chunk size and trigger send */ 216 + reg = VHUB_EP0_SET_TX_LEN(chunk); 217 + writel(reg, ep->ep0.ctlstat); 218 + writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat); 219 + req->req.actual += chunk; 220 + } 221 + 222 + static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep) 223 + { 224 + EPVDBG(ep, "rx prime\n"); 225 + 226 + /* Prime endpoint for receiving data */ 227 + writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat + AST_VHUB_EP0_CTRL); 228 + } 229 + 230 + static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req, 231 + unsigned int len) 232 + { 233 + unsigned int remain; 234 + int rc = 0; 235 + 236 + /* We are receiving... grab request */ 237 + remain = req->req.length - req->req.actual; 238 + 239 + EPVDBG(ep, "receive got=%d remain=%d\n", len, remain); 240 + 241 + /* Are we getting more than asked ? */ 242 + if (len > remain) { 243 + EPDBG(ep, "receiving too much (ovf: %d) !\n", 244 + len - remain); 245 + len = remain; 246 + rc = -EOVERFLOW; 247 + } 248 + if (len && req->req.buf) 249 + memcpy(req->req.buf + req->req.actual, ep->buf, len); 250 + req->req.actual += len; 251 + 252 + /* Done ? */ 253 + if (len < ep->ep.maxpacket || len == remain) { 254 + ep->ep0.state = ep0_state_status; 255 + writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat); 256 + ast_vhub_done(ep, req, rc); 257 + } else 258 + ast_vhub_ep0_rx_prime(ep); 259 + } 260 + 261 + void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack) 262 + { 263 + struct ast_vhub_req *req; 264 + struct ast_vhub *vhub = ep->vhub; 265 + struct device *dev = &vhub->pdev->dev; 266 + bool stall = false; 267 + u32 stat; 268 + 269 + /* Read EP0 status */ 270 + stat = readl(ep->ep0.ctlstat); 271 + 272 + /* Grab current request if any */ 273 + req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue); 274 + 275 + EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n", 276 + stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req); 277 + 278 + switch(ep->ep0.state) { 279 + case ep0_state_token: 280 + /* There should be no request queued in that state... */ 281 + if (req) { 282 + dev_warn(dev, "request present while in TOKEN state\n"); 283 + ast_vhub_nuke(ep, -EINVAL); 284 + } 285 + dev_warn(dev, "ack while in TOKEN state\n"); 286 + stall = true; 287 + break; 288 + case ep0_state_data: 289 + /* Check the state bits corresponding to our direction */ 290 + if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) || 291 + (!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) || 292 + (ep->ep0.dir_in != in_ack)) { 293 + dev_warn(dev, "irq state mismatch"); 294 + stall = true; 295 + break; 296 + } 297 + /* 298 + * We are in data phase and there's no request, something is 299 + * wrong, stall 300 + */ 301 + if (!req) { 302 + dev_warn(dev, "data phase, no request\n"); 303 + stall = true; 304 + break; 305 + } 306 + 307 + /* We have a request, handle data transfers */ 308 + if (ep->ep0.dir_in) 309 + ast_vhub_ep0_do_send(ep, req); 310 + else 311 + ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat)); 312 + return; 313 + case ep0_state_status: 314 + /* Nuke stale requests */ 315 + if (req) { 316 + dev_warn(dev, "request present while in STATUS state\n"); 317 + ast_vhub_nuke(ep, -EINVAL); 318 + } 319 + 320 + /* 321 + * If the status phase completes with the wrong ack, stall 322 + * the endpoint just in case, to abort whatever the host 323 + * was doing. 324 + */ 325 + if (ep->ep0.dir_in == in_ack) { 326 + dev_warn(dev, "status direction mismatch\n"); 327 + stall = true; 328 + } 329 + } 330 + 331 + /* Reset to token state */ 332 + ep->ep0.state = ep0_state_token; 333 + if (stall) 334 + writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat); 335 + } 336 + 337 + static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req, 338 + gfp_t gfp_flags) 339 + { 340 + struct ast_vhub_req *req = to_ast_req(u_req); 341 + struct ast_vhub_ep *ep = to_ast_ep(u_ep); 342 + struct ast_vhub *vhub = ep->vhub; 343 + struct device *dev = &vhub->pdev->dev; 344 + unsigned long flags; 345 + 346 + /* Paranoid cheks */ 347 + if (!u_req || (!u_req->complete && !req->internal)) { 348 + dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req); 349 + if (u_req) { 350 + dev_warn(dev, "complete=%p internal=%d\n", 351 + u_req->complete, req->internal); 352 + } 353 + return -EINVAL; 354 + } 355 + 356 + /* Not endpoint 0 ? */ 357 + if (WARN_ON(ep->d_idx != 0)) 358 + return -EINVAL; 359 + 360 + /* Disabled device */ 361 + if (ep->dev && (!ep->dev->enabled || ep->dev->suspended)) 362 + return -ESHUTDOWN; 363 + 364 + /* Data, no buffer and not internal ? */ 365 + if (u_req->length && !u_req->buf && !req->internal) { 366 + dev_warn(dev, "Request with no buffer !\n"); 367 + return -EINVAL; 368 + } 369 + 370 + EPVDBG(ep, "enqueue req @%p\n", req); 371 + EPVDBG(ep, " l=%d zero=%d noshort=%d is_in=%d\n", 372 + u_req->length, u_req->zero, 373 + u_req->short_not_ok, ep->ep0.dir_in); 374 + 375 + /* Initialize request progress fields */ 376 + u_req->status = -EINPROGRESS; 377 + u_req->actual = 0; 378 + req->last_desc = -1; 379 + req->active = false; 380 + 381 + spin_lock_irqsave(&vhub->lock, flags); 382 + 383 + /* EP0 can only support a single request at a time */ 384 + if (!list_empty(&ep->queue) || ep->ep0.state == ep0_state_token) { 385 + dev_warn(dev, "EP0: Request in wrong state\n"); 386 + spin_unlock_irqrestore(&vhub->lock, flags); 387 + return -EBUSY; 388 + } 389 + 390 + /* Add request to list and kick processing if empty */ 391 + list_add_tail(&req->queue, &ep->queue); 392 + 393 + if (ep->ep0.dir_in) { 394 + /* IN request, send data */ 395 + ast_vhub_ep0_do_send(ep, req); 396 + } else if (u_req->length == 0) { 397 + /* 0-len request, send completion as rx */ 398 + EPVDBG(ep, "0-length rx completion\n"); 399 + ep->ep0.state = ep0_state_status; 400 + writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat); 401 + ast_vhub_done(ep, req, 0); 402 + } else { 403 + /* OUT request, start receiver */ 404 + ast_vhub_ep0_rx_prime(ep); 405 + } 406 + 407 + spin_unlock_irqrestore(&vhub->lock, flags); 408 + 409 + return 0; 410 + } 411 + 412 + static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req) 413 + { 414 + struct ast_vhub_ep *ep = to_ast_ep(u_ep); 415 + struct ast_vhub *vhub = ep->vhub; 416 + struct ast_vhub_req *req; 417 + unsigned long flags; 418 + int rc = -EINVAL; 419 + 420 + spin_lock_irqsave(&vhub->lock, flags); 421 + 422 + /* Only one request can be in the queue */ 423 + req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue); 424 + 425 + /* Is it ours ? */ 426 + if (req && u_req == &req->req) { 427 + EPVDBG(ep, "dequeue req @%p\n", req); 428 + 429 + /* 430 + * We don't have to deal with "active" as all 431 + * DMAs go to the EP buffers, not the request. 432 + */ 433 + ast_vhub_done(ep, req, -ECONNRESET); 434 + 435 + /* We do stall the EP to clean things up in HW */ 436 + writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat); 437 + ep->ep0.state = ep0_state_status; 438 + ep->ep0.dir_in = false; 439 + rc = 0; 440 + } 441 + spin_unlock_irqrestore(&vhub->lock, flags); 442 + return rc; 443 + } 444 + 445 + 446 + static const struct usb_ep_ops ast_vhub_ep0_ops = { 447 + .queue = ast_vhub_ep0_queue, 448 + .dequeue = ast_vhub_ep0_dequeue, 449 + .alloc_request = ast_vhub_alloc_request, 450 + .free_request = ast_vhub_free_request, 451 + }; 452 + 453 + void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep, 454 + struct ast_vhub_dev *dev) 455 + { 456 + memset(ep, 0, sizeof(*ep)); 457 + 458 + INIT_LIST_HEAD(&ep->ep.ep_list); 459 + INIT_LIST_HEAD(&ep->queue); 460 + ep->ep.ops = &ast_vhub_ep0_ops; 461 + ep->ep.name = "ep0"; 462 + ep->ep.caps.type_control = true; 463 + usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET); 464 + ep->d_idx = 0; 465 + ep->dev = dev; 466 + ep->vhub = vhub; 467 + ep->ep0.state = ep0_state_token; 468 + INIT_LIST_HEAD(&ep->ep0.req.queue); 469 + ep->ep0.req.internal = true; 470 + 471 + /* Small difference between vHub and devices */ 472 + if (dev) { 473 + ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL; 474 + ep->ep0.setup = vhub->regs + 475 + AST_VHUB_SETUP0 + 8 * (dev->index + 1); 476 + ep->buf = vhub->ep0_bufs + 477 + AST_VHUB_EP0_MAX_PACKET * (dev->index + 1); 478 + ep->buf_dma = vhub->ep0_bufs_dma + 479 + AST_VHUB_EP0_MAX_PACKET * (dev->index + 1); 480 + } else { 481 + ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL; 482 + ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0; 483 + ep->buf = vhub->ep0_bufs; 484 + ep->buf_dma = vhub->ep0_bufs_dma; 485 + } 486 + }
+843
drivers/usb/gadget/udc/aspeed-vhub/epn.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget 4 + * 5 + * epn.c - Generic endpoints management 6 + * 7 + * Copyright 2017 IBM Corporation 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License as published by 11 + * the Free Software Foundation; either version 2 of the License, or 12 + * (at your option) any later version. 13 + */ 14 + 15 + #include <linux/kernel.h> 16 + #include <linux/module.h> 17 + #include <linux/platform_device.h> 18 + #include <linux/delay.h> 19 + #include <linux/ioport.h> 20 + #include <linux/slab.h> 21 + #include <linux/errno.h> 22 + #include <linux/list.h> 23 + #include <linux/interrupt.h> 24 + #include <linux/proc_fs.h> 25 + #include <linux/prefetch.h> 26 + #include <linux/clk.h> 27 + #include <linux/usb/gadget.h> 28 + #include <linux/of.h> 29 + #include <linux/of_gpio.h> 30 + #include <linux/regmap.h> 31 + #include <linux/dma-mapping.h> 32 + 33 + #include "vhub.h" 34 + 35 + #define EXTRA_CHECKS 36 + 37 + #ifdef EXTRA_CHECKS 38 + #define CHECK(ep, expr, fmt...) \ 39 + do { \ 40 + if (!(expr)) EPDBG(ep, "CHECK:" fmt); \ 41 + } while(0) 42 + #else 43 + #define CHECK(ep, expr, fmt...) do { } while(0) 44 + #endif 45 + 46 + static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req) 47 + { 48 + unsigned int act = req->req.actual; 49 + unsigned int len = req->req.length; 50 + unsigned int chunk; 51 + 52 + /* There should be no DMA ongoing */ 53 + WARN_ON(req->active); 54 + 55 + /* Calculate next chunk size */ 56 + chunk = len - act; 57 + if (chunk > ep->ep.maxpacket) 58 + chunk = ep->ep.maxpacket; 59 + else if ((chunk < ep->ep.maxpacket) || !req->req.zero) 60 + req->last_desc = 1; 61 + 62 + EPVDBG(ep, "kick req %p act=%d/%d chunk=%d last=%d\n", 63 + req, act, len, chunk, req->last_desc); 64 + 65 + /* If DMA unavailable, using staging EP buffer */ 66 + if (!req->req.dma) { 67 + 68 + /* For IN transfers, copy data over first */ 69 + if (ep->epn.is_in) 70 + memcpy(ep->buf, req->req.buf + act, chunk); 71 + writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE); 72 + } else 73 + writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE); 74 + 75 + /* Start DMA */ 76 + req->active = true; 77 + writel(VHUB_EP_DMA_SET_TX_SIZE(chunk), 78 + ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 79 + writel(VHUB_EP_DMA_SET_TX_SIZE(chunk) | VHUB_EP_DMA_SINGLE_KICK, 80 + ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 81 + } 82 + 83 + static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep) 84 + { 85 + struct ast_vhub_req *req; 86 + unsigned int len; 87 + u32 stat; 88 + 89 + /* Read EP status */ 90 + stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 91 + 92 + /* Grab current request if any */ 93 + req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue); 94 + 95 + EPVDBG(ep, "ACK status=%08x is_in=%d, req=%p (active=%d)\n", 96 + stat, ep->epn.is_in, req, req ? req->active : 0); 97 + 98 + /* In absence of a request, bail out, must have been dequeued */ 99 + if (!req) 100 + return; 101 + 102 + /* 103 + * Request not active, move on to processing queue, active request 104 + * was probably dequeued 105 + */ 106 + if (!req->active) 107 + goto next_chunk; 108 + 109 + /* Check if HW has moved on */ 110 + if (VHUB_EP_DMA_RPTR(stat) != 0) { 111 + EPDBG(ep, "DMA read pointer not 0 !\n"); 112 + return; 113 + } 114 + 115 + /* No current DMA ongoing */ 116 + req->active = false; 117 + 118 + /* Grab lenght out of HW */ 119 + len = VHUB_EP_DMA_TX_SIZE(stat); 120 + 121 + /* If not using DMA, copy data out if needed */ 122 + if (!req->req.dma && !ep->epn.is_in && len) 123 + memcpy(req->req.buf + req->req.actual, ep->buf, len); 124 + 125 + /* Adjust size */ 126 + req->req.actual += len; 127 + 128 + /* Check for short packet */ 129 + if (len < ep->ep.maxpacket) 130 + req->last_desc = 1; 131 + 132 + /* That's it ? complete the request and pick a new one */ 133 + if (req->last_desc >= 0) { 134 + ast_vhub_done(ep, req, 0); 135 + req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, 136 + queue); 137 + 138 + /* 139 + * Due to lock dropping inside "done" the next request could 140 + * already be active, so check for that and bail if needed. 141 + */ 142 + if (!req || req->active) 143 + return; 144 + } 145 + 146 + next_chunk: 147 + ast_vhub_epn_kick(ep, req); 148 + } 149 + 150 + static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep) 151 + { 152 + /* 153 + * d_next == d_last means descriptor list empty to HW, 154 + * thus we can only have AST_VHUB_DESCS_COUNT-1 descriptors 155 + * in the list 156 + */ 157 + return (ep->epn.d_last + AST_VHUB_DESCS_COUNT - ep->epn.d_next - 1) & 158 + (AST_VHUB_DESCS_COUNT - 1); 159 + } 160 + 161 + static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep, 162 + struct ast_vhub_req *req) 163 + { 164 + unsigned int act = req->act_count; 165 + unsigned int len = req->req.length; 166 + unsigned int chunk; 167 + 168 + /* Mark request active if not already */ 169 + req->active = true; 170 + 171 + /* If the request was already completely written, do nothing */ 172 + if (req->last_desc >= 0) 173 + return; 174 + 175 + EPVDBG(ep, "kick act=%d/%d chunk_max=%d free_descs=%d\n", 176 + act, len, ep->epn.chunk_max, ast_vhub_count_free_descs(ep)); 177 + 178 + /* While we can create descriptors */ 179 + while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) { 180 + struct ast_vhub_desc *desc; 181 + unsigned int d_num; 182 + 183 + /* Grab next free descriptor */ 184 + d_num = ep->epn.d_next; 185 + desc = &ep->epn.descs[d_num]; 186 + ep->epn.d_next = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1); 187 + 188 + /* Calculate next chunk size */ 189 + chunk = len - act; 190 + if (chunk <= ep->epn.chunk_max) { 191 + /* 192 + * Is this the last packet ? Because of having up to 8 193 + * packets in a descriptor we can't just compare "chunk" 194 + * with ep.maxpacket. We have to see if it's a multiple 195 + * of it to know if we have to send a zero packet. 196 + * Sadly that involves a modulo which is a bit expensive 197 + * but probably still better than not doing it. 198 + */ 199 + if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0) 200 + req->last_desc = d_num; 201 + } else { 202 + chunk = ep->epn.chunk_max; 203 + } 204 + 205 + EPVDBG(ep, " chunk: act=%d/%d chunk=%d last=%d desc=%d free=%d\n", 206 + act, len, chunk, req->last_desc, d_num, 207 + ast_vhub_count_free_descs(ep)); 208 + 209 + /* Populate descriptor */ 210 + desc->w0 = cpu_to_le32(req->req.dma + act); 211 + 212 + /* Interrupt if end of request or no more descriptors */ 213 + 214 + /* 215 + * TODO: Be smarter about it, if we don't have enough 216 + * descriptors request an interrupt before queue empty 217 + * or so in order to be able to populate more before 218 + * the HW runs out. This isn't a problem at the moment 219 + * as we use 256 descriptors and only put at most one 220 + * request in the ring. 221 + */ 222 + desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk)); 223 + if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep)) 224 + desc->w1 |= cpu_to_le32(VHUB_DSC1_IN_INTERRUPT); 225 + 226 + /* Account packet */ 227 + req->act_count = act = act + chunk; 228 + } 229 + 230 + /* Tell HW about new descriptors */ 231 + writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next), 232 + ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 233 + 234 + EPVDBG(ep, "HW kicked, d_next=%d dstat=%08x\n", 235 + ep->epn.d_next, readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS)); 236 + } 237 + 238 + static void ast_vhub_epn_handle_ack_desc(struct ast_vhub_ep *ep) 239 + { 240 + struct ast_vhub_req *req; 241 + unsigned int len, d_last; 242 + u32 stat, stat1; 243 + 244 + /* Read EP status, workaround HW race */ 245 + do { 246 + stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 247 + stat1 = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 248 + } while(stat != stat1); 249 + 250 + /* Extract RPTR */ 251 + d_last = VHUB_EP_DMA_RPTR(stat); 252 + 253 + /* Grab current request if any */ 254 + req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue); 255 + 256 + EPVDBG(ep, "ACK status=%08x is_in=%d ep->d_last=%d..%d\n", 257 + stat, ep->epn.is_in, ep->epn.d_last, d_last); 258 + 259 + /* Check all completed descriptors */ 260 + while (ep->epn.d_last != d_last) { 261 + struct ast_vhub_desc *desc; 262 + unsigned int d_num; 263 + bool is_last_desc; 264 + 265 + /* Grab next completed descriptor */ 266 + d_num = ep->epn.d_last; 267 + desc = &ep->epn.descs[d_num]; 268 + ep->epn.d_last = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1); 269 + 270 + /* Grab len out of descriptor */ 271 + len = VHUB_DSC1_IN_LEN(le32_to_cpu(desc->w1)); 272 + 273 + EPVDBG(ep, " desc %d len=%d req=%p (act=%d)\n", 274 + d_num, len, req, req ? req->active : 0); 275 + 276 + /* If no active request pending, move on */ 277 + if (!req || !req->active) 278 + continue; 279 + 280 + /* Adjust size */ 281 + req->req.actual += len; 282 + 283 + /* Is that the last chunk ? */ 284 + is_last_desc = req->last_desc == d_num; 285 + CHECK(ep, is_last_desc == (len < ep->ep.maxpacket || 286 + (req->req.actual >= req->req.length && 287 + !req->req.zero)), 288 + "Last packet discrepancy: last_desc=%d len=%d r.act=%d " 289 + "r.len=%d r.zero=%d mp=%d\n", 290 + is_last_desc, len, req->req.actual, req->req.length, 291 + req->req.zero, ep->ep.maxpacket); 292 + 293 + if (is_last_desc) { 294 + /* 295 + * Because we can only have one request at a time 296 + * in our descriptor list in this implementation, 297 + * d_last and ep->d_last should now be equal 298 + */ 299 + CHECK(ep, d_last == ep->epn.d_last, 300 + "DMA read ptr mismatch %d vs %d\n", 301 + d_last, ep->epn.d_last); 302 + 303 + /* Note: done will drop and re-acquire the lock */ 304 + ast_vhub_done(ep, req, 0); 305 + req = list_first_entry_or_null(&ep->queue, 306 + struct ast_vhub_req, 307 + queue); 308 + break; 309 + } 310 + } 311 + 312 + /* More work ? */ 313 + if (req) 314 + ast_vhub_epn_kick_desc(ep, req); 315 + } 316 + 317 + void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep) 318 + { 319 + if (ep->epn.desc_mode) 320 + ast_vhub_epn_handle_ack_desc(ep); 321 + else 322 + ast_vhub_epn_handle_ack(ep); 323 + } 324 + 325 + static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req, 326 + gfp_t gfp_flags) 327 + { 328 + struct ast_vhub_req *req = to_ast_req(u_req); 329 + struct ast_vhub_ep *ep = to_ast_ep(u_ep); 330 + struct ast_vhub *vhub = ep->vhub; 331 + unsigned long flags; 332 + bool empty; 333 + int rc; 334 + 335 + /* Paranoid checks */ 336 + if (!u_req || !u_req->complete || !u_req->buf) { 337 + dev_warn(&vhub->pdev->dev, "Bogus EPn request ! u_req=%p\n", u_req); 338 + if (u_req) { 339 + dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n", 340 + u_req->complete, req->internal); 341 + } 342 + return -EINVAL; 343 + } 344 + 345 + /* Endpoint enabled ? */ 346 + if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx || 347 + !ep->dev->enabled || ep->dev->suspended) { 348 + EPDBG(ep,"Enqueing request on wrong or disabled EP\n"); 349 + return -ESHUTDOWN; 350 + } 351 + 352 + /* Map request for DMA if possible. For now, the rule for DMA is 353 + * that: 354 + * 355 + * * For single stage mode (no descriptors): 356 + * 357 + * - The buffer is aligned to a 8 bytes boundary (HW requirement) 358 + * - For a OUT endpoint, the request size is a multiple of the EP 359 + * packet size (otherwise the controller will DMA past the end 360 + * of the buffer if the host is sending a too long packet). 361 + * 362 + * * For descriptor mode (tx only for now), always. 363 + * 364 + * We could relax the latter by making the decision to use the bounce 365 + * buffer based on the size of a given *segment* of the request rather 366 + * than the whole request. 367 + */ 368 + if (ep->epn.desc_mode || 369 + ((((unsigned long)u_req->buf & 7) == 0) && 370 + (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) { 371 + rc = usb_gadget_map_request(&ep->dev->gadget, u_req, 372 + ep->epn.is_in); 373 + if (rc) { 374 + dev_warn(&vhub->pdev->dev, 375 + "Request mapping failure %d\n", rc); 376 + return rc; 377 + } 378 + } else 379 + u_req->dma = 0; 380 + 381 + EPVDBG(ep, "enqueue req @%p\n", req); 382 + EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n", 383 + u_req->length, (u32)u_req->dma, u_req->zero, 384 + u_req->short_not_ok, u_req->no_interrupt, 385 + ep->epn.is_in); 386 + 387 + /* Initialize request progress fields */ 388 + u_req->status = -EINPROGRESS; 389 + u_req->actual = 0; 390 + req->act_count = 0; 391 + req->active = false; 392 + req->last_desc = -1; 393 + spin_lock_irqsave(&vhub->lock, flags); 394 + empty = list_empty(&ep->queue); 395 + 396 + /* Add request to list and kick processing if empty */ 397 + list_add_tail(&req->queue, &ep->queue); 398 + if (empty) { 399 + if (ep->epn.desc_mode) 400 + ast_vhub_epn_kick_desc(ep, req); 401 + else 402 + ast_vhub_epn_kick(ep, req); 403 + } 404 + spin_unlock_irqrestore(&vhub->lock, flags); 405 + 406 + return 0; 407 + } 408 + 409 + static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep, 410 + bool restart_ep) 411 + { 412 + u32 state, reg, loops; 413 + 414 + /* Stop DMA activity */ 415 + writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 416 + 417 + /* Wait for it to complete */ 418 + for (loops = 0; loops < 1000; loops++) { 419 + state = readl(ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 420 + state = VHUB_EP_DMA_PROC_STATUS(state); 421 + if (state == EP_DMA_PROC_RX_IDLE || 422 + state == EP_DMA_PROC_TX_IDLE) 423 + break; 424 + udelay(1); 425 + } 426 + if (loops >= 1000) 427 + dev_warn(&ep->vhub->pdev->dev, "Timeout waiting for DMA\n"); 428 + 429 + /* If we don't have to restart the endpoint, that's it */ 430 + if (!restart_ep) 431 + return; 432 + 433 + /* Restart the endpoint */ 434 + if (ep->epn.desc_mode) { 435 + /* 436 + * Take out descriptors by resetting the DMA read 437 + * pointer to be equal to the CPU write pointer. 438 + * 439 + * Note: If we ever support creating descriptors for 440 + * requests that aren't the head of the queue, we 441 + * may have to do something more complex here, 442 + * especially if the request being taken out is 443 + * not the current head descriptors. 444 + */ 445 + reg = VHUB_EP_DMA_SET_RPTR(ep->epn.d_next) | 446 + VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next); 447 + writel(reg, ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 448 + 449 + /* Then turn it back on */ 450 + writel(ep->epn.dma_conf, 451 + ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 452 + } else { 453 + /* Single mode: just turn it back on */ 454 + writel(ep->epn.dma_conf, 455 + ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 456 + } 457 + } 458 + 459 + static int ast_vhub_epn_dequeue(struct usb_ep* u_ep, struct usb_request *u_req) 460 + { 461 + struct ast_vhub_ep *ep = to_ast_ep(u_ep); 462 + struct ast_vhub *vhub = ep->vhub; 463 + struct ast_vhub_req *req; 464 + unsigned long flags; 465 + int rc = -EINVAL; 466 + 467 + spin_lock_irqsave(&vhub->lock, flags); 468 + 469 + /* Make sure it's actually queued on this endpoint */ 470 + list_for_each_entry (req, &ep->queue, queue) { 471 + if (&req->req == u_req) 472 + break; 473 + } 474 + 475 + if (&req->req == u_req) { 476 + EPVDBG(ep, "dequeue req @%p active=%d\n", 477 + req, req->active); 478 + if (req->active) 479 + ast_vhub_stop_active_req(ep, true); 480 + ast_vhub_done(ep, req, -ECONNRESET); 481 + rc = 0; 482 + } 483 + 484 + spin_unlock_irqrestore(&vhub->lock, flags); 485 + return rc; 486 + } 487 + 488 + void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep) 489 + { 490 + u32 reg; 491 + 492 + if (WARN_ON(ep->d_idx == 0)) 493 + return; 494 + reg = readl(ep->epn.regs + AST_VHUB_EP_CONFIG); 495 + if (ep->epn.stalled || ep->epn.wedged) 496 + reg |= VHUB_EP_CFG_STALL_CTRL; 497 + else 498 + reg &= ~VHUB_EP_CFG_STALL_CTRL; 499 + writel(reg, ep->epn.regs + AST_VHUB_EP_CONFIG); 500 + 501 + if (!ep->epn.stalled && !ep->epn.wedged) 502 + writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx), 503 + ep->vhub->regs + AST_VHUB_EP_TOGGLE); 504 + } 505 + 506 + static int ast_vhub_set_halt_and_wedge(struct usb_ep* u_ep, bool halt, 507 + bool wedge) 508 + { 509 + struct ast_vhub_ep *ep = to_ast_ep(u_ep); 510 + struct ast_vhub *vhub = ep->vhub; 511 + unsigned long flags; 512 + 513 + EPDBG(ep, "Set halt (%d) & wedge (%d)\n", halt, wedge); 514 + 515 + if (!u_ep || !u_ep->desc) 516 + return -EINVAL; 517 + if (ep->d_idx == 0) 518 + return 0; 519 + if (ep->epn.is_iso) 520 + return -EOPNOTSUPP; 521 + 522 + spin_lock_irqsave(&vhub->lock, flags); 523 + 524 + /* Fail with still-busy IN endpoints */ 525 + if (halt && ep->epn.is_in && !list_empty(&ep->queue)) { 526 + spin_unlock_irqrestore(&vhub->lock, flags); 527 + return -EAGAIN; 528 + } 529 + ep->epn.stalled = halt; 530 + ep->epn.wedged = wedge; 531 + ast_vhub_update_epn_stall(ep); 532 + 533 + spin_unlock_irqrestore(&vhub->lock, flags); 534 + 535 + return 0; 536 + } 537 + 538 + static int ast_vhub_epn_set_halt(struct usb_ep *u_ep, int value) 539 + { 540 + return ast_vhub_set_halt_and_wedge(u_ep, value != 0, false); 541 + } 542 + 543 + static int ast_vhub_epn_set_wedge(struct usb_ep *u_ep) 544 + { 545 + return ast_vhub_set_halt_and_wedge(u_ep, true, true); 546 + } 547 + 548 + static int ast_vhub_epn_disable(struct usb_ep* u_ep) 549 + { 550 + struct ast_vhub_ep *ep = to_ast_ep(u_ep); 551 + struct ast_vhub *vhub = ep->vhub; 552 + unsigned long flags; 553 + u32 imask, ep_ier; 554 + 555 + EPDBG(ep, "Disabling !\n"); 556 + 557 + spin_lock_irqsave(&vhub->lock, flags); 558 + 559 + ep->epn.enabled = false; 560 + 561 + /* Stop active DMA if any */ 562 + ast_vhub_stop_active_req(ep, false); 563 + 564 + /* Disable endpoint */ 565 + writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG); 566 + 567 + /* Disable ACK interrupt */ 568 + imask = VHUB_EP_IRQ(ep->epn.g_idx); 569 + ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER); 570 + ep_ier &= ~imask; 571 + writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER); 572 + writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR); 573 + 574 + /* Nuke all pending requests */ 575 + ast_vhub_nuke(ep, -ESHUTDOWN); 576 + 577 + /* No more descriptor associated with request */ 578 + ep->ep.desc = NULL; 579 + 580 + spin_unlock_irqrestore(&vhub->lock, flags); 581 + 582 + return 0; 583 + } 584 + 585 + static int ast_vhub_epn_enable(struct usb_ep* u_ep, 586 + const struct usb_endpoint_descriptor *desc) 587 + { 588 + static const char *ep_type_string[] __maybe_unused = { "ctrl", 589 + "isoc", 590 + "bulk", 591 + "intr" }; 592 + struct ast_vhub_ep *ep = to_ast_ep(u_ep); 593 + struct ast_vhub_dev *dev; 594 + struct ast_vhub *vhub; 595 + u16 maxpacket, type; 596 + unsigned long flags; 597 + u32 ep_conf, ep_ier, imask; 598 + 599 + /* Check arguments */ 600 + if (!u_ep || !desc) 601 + return -EINVAL; 602 + 603 + maxpacket = usb_endpoint_maxp(desc); 604 + if (!ep->d_idx || !ep->dev || 605 + desc->bDescriptorType != USB_DT_ENDPOINT || 606 + maxpacket == 0 || maxpacket > ep->ep.maxpacket) { 607 + EPDBG(ep, "Invalid EP enable,d_idx=%d,dev=%p,type=%d,mp=%d/%d\n", 608 + ep->d_idx, ep->dev, desc->bDescriptorType, 609 + maxpacket, ep->ep.maxpacket); 610 + return -EINVAL; 611 + } 612 + if (ep->d_idx != usb_endpoint_num(desc)) { 613 + EPDBG(ep, "EP number mismatch !\n"); 614 + return -EINVAL; 615 + } 616 + 617 + if (ep->epn.enabled) { 618 + EPDBG(ep, "Already enabled\n"); 619 + return -EBUSY; 620 + } 621 + dev = ep->dev; 622 + vhub = ep->vhub; 623 + 624 + /* Check device state */ 625 + if (!dev->driver) { 626 + EPDBG(ep, "Bogus device state: driver=%p speed=%d\n", 627 + dev->driver, dev->gadget.speed); 628 + return -ESHUTDOWN; 629 + } 630 + 631 + /* Grab some info from the descriptor */ 632 + ep->epn.is_in = usb_endpoint_dir_in(desc); 633 + ep->ep.maxpacket = maxpacket; 634 + type = usb_endpoint_type(desc); 635 + ep->epn.d_next = ep->epn.d_last = 0; 636 + ep->epn.is_iso = false; 637 + ep->epn.stalled = false; 638 + ep->epn.wedged = false; 639 + 640 + EPDBG(ep, "Enabling [%s] %s num %d maxpacket=%d\n", 641 + ep->epn.is_in ? "in" : "out", ep_type_string[type], 642 + usb_endpoint_num(desc), maxpacket); 643 + 644 + /* Can we use DMA descriptor mode ? */ 645 + ep->epn.desc_mode = ep->epn.descs && ep->epn.is_in; 646 + if (ep->epn.desc_mode) 647 + memset(ep->epn.descs, 0, 8 * AST_VHUB_DESCS_COUNT); 648 + 649 + /* 650 + * Large send function can send up to 8 packets from 651 + * one descriptor with a limit of 4095 bytes. 652 + */ 653 + ep->epn.chunk_max = ep->ep.maxpacket; 654 + if (ep->epn.is_in) { 655 + ep->epn.chunk_max <<= 3; 656 + while (ep->epn.chunk_max > 4095) 657 + ep->epn.chunk_max -= ep->ep.maxpacket; 658 + } 659 + 660 + switch(type) { 661 + case USB_ENDPOINT_XFER_CONTROL: 662 + EPDBG(ep, "Only one control endpoint\n"); 663 + return -EINVAL; 664 + case USB_ENDPOINT_XFER_INT: 665 + ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_INT); 666 + break; 667 + case USB_ENDPOINT_XFER_BULK: 668 + ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_BULK); 669 + break; 670 + case USB_ENDPOINT_XFER_ISOC: 671 + ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_ISO); 672 + ep->epn.is_iso = true; 673 + break; 674 + default: 675 + return -EINVAL; 676 + } 677 + 678 + /* Encode the rest of the EP config register */ 679 + if (maxpacket < 1024) 680 + ep_conf |= VHUB_EP_CFG_SET_MAX_PKT(maxpacket); 681 + if (!ep->epn.is_in) 682 + ep_conf |= VHUB_EP_CFG_DIR_OUT; 683 + ep_conf |= VHUB_EP_CFG_SET_EP_NUM(usb_endpoint_num(desc)); 684 + ep_conf |= VHUB_EP_CFG_ENABLE; 685 + ep_conf |= VHUB_EP_CFG_SET_DEV(dev->index + 1); 686 + EPVDBG(ep, "config=%08x\n", ep_conf); 687 + 688 + spin_lock_irqsave(&vhub->lock, flags); 689 + 690 + /* Disable HW and reset DMA */ 691 + writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG); 692 + writel(VHUB_EP_DMA_CTRL_RESET, 693 + ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 694 + 695 + /* Configure and enable */ 696 + writel(ep_conf, ep->epn.regs + AST_VHUB_EP_CONFIG); 697 + 698 + if (ep->epn.desc_mode) { 699 + /* Clear DMA status, including the DMA read ptr */ 700 + writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 701 + 702 + /* Set descriptor base */ 703 + writel(ep->epn.descs_dma, 704 + ep->epn.regs + AST_VHUB_EP_DESC_BASE); 705 + 706 + /* Set base DMA config value */ 707 + ep->epn.dma_conf = VHUB_EP_DMA_DESC_MODE; 708 + if (ep->epn.is_in) 709 + ep->epn.dma_conf |= VHUB_EP_DMA_IN_LONG_MODE; 710 + 711 + /* First reset and disable all operations */ 712 + writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET, 713 + ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 714 + 715 + /* Enable descriptor mode */ 716 + writel(ep->epn.dma_conf, 717 + ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 718 + } else { 719 + /* Set base DMA config value */ 720 + ep->epn.dma_conf = VHUB_EP_DMA_SINGLE_STAGE; 721 + 722 + /* Reset and switch to single stage mode */ 723 + writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET, 724 + ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 725 + writel(ep->epn.dma_conf, 726 + ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 727 + writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 728 + } 729 + 730 + /* Cleanup data toggle just in case */ 731 + writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx), 732 + vhub->regs + AST_VHUB_EP_TOGGLE); 733 + 734 + /* Cleanup and enable ACK interrupt */ 735 + imask = VHUB_EP_IRQ(ep->epn.g_idx); 736 + writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR); 737 + ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER); 738 + ep_ier |= imask; 739 + writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER); 740 + 741 + /* Woot, we are online ! */ 742 + ep->epn.enabled = true; 743 + 744 + spin_unlock_irqrestore(&vhub->lock, flags); 745 + 746 + return 0; 747 + } 748 + 749 + static void ast_vhub_epn_dispose(struct usb_ep *u_ep) 750 + { 751 + struct ast_vhub_ep *ep = to_ast_ep(u_ep); 752 + 753 + if (WARN_ON(!ep->dev || !ep->d_idx)) 754 + return; 755 + 756 + EPDBG(ep, "Releasing endpoint\n"); 757 + 758 + /* Take it out of the EP list */ 759 + list_del_init(&ep->ep.ep_list); 760 + 761 + /* Mark the address free in the device */ 762 + ep->dev->epns[ep->d_idx - 1] = NULL; 763 + 764 + /* Free name & DMA buffers */ 765 + kfree(ep->ep.name); 766 + ep->ep.name = NULL; 767 + dma_free_coherent(&ep->vhub->pdev->dev, 768 + AST_VHUB_EPn_MAX_PACKET + 769 + 8 * AST_VHUB_DESCS_COUNT, 770 + ep->buf, ep->buf_dma); 771 + ep->buf = NULL; 772 + ep->epn.descs = NULL; 773 + 774 + /* Mark free */ 775 + ep->dev = NULL; 776 + } 777 + 778 + static const struct usb_ep_ops ast_vhub_epn_ops = { 779 + .enable = ast_vhub_epn_enable, 780 + .disable = ast_vhub_epn_disable, 781 + .dispose = ast_vhub_epn_dispose, 782 + .queue = ast_vhub_epn_queue, 783 + .dequeue = ast_vhub_epn_dequeue, 784 + .set_halt = ast_vhub_epn_set_halt, 785 + .set_wedge = ast_vhub_epn_set_wedge, 786 + .alloc_request = ast_vhub_alloc_request, 787 + .free_request = ast_vhub_free_request, 788 + }; 789 + 790 + struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr) 791 + { 792 + struct ast_vhub *vhub = d->vhub; 793 + struct ast_vhub_ep *ep; 794 + unsigned long flags; 795 + int i; 796 + 797 + /* Find a free one (no device) */ 798 + spin_lock_irqsave(&vhub->lock, flags); 799 + for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) 800 + if (vhub->epns[i].dev == NULL) 801 + break; 802 + if (i >= AST_VHUB_NUM_GEN_EPs) { 803 + spin_unlock_irqrestore(&vhub->lock, flags); 804 + return NULL; 805 + } 806 + 807 + /* Set it up */ 808 + ep = &vhub->epns[i]; 809 + ep->dev = d; 810 + spin_unlock_irqrestore(&vhub->lock, flags); 811 + 812 + DDBG(d, "Allocating gen EP %d for addr %d\n", i, addr); 813 + INIT_LIST_HEAD(&ep->queue); 814 + ep->d_idx = addr; 815 + ep->vhub = vhub; 816 + ep->ep.ops = &ast_vhub_epn_ops; 817 + ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", addr); 818 + d->epns[addr-1] = ep; 819 + ep->epn.g_idx = i; 820 + ep->epn.regs = vhub->regs + 0x200 + (i * 0x10); 821 + 822 + ep->buf = dma_alloc_coherent(&vhub->pdev->dev, 823 + AST_VHUB_EPn_MAX_PACKET + 824 + 8 * AST_VHUB_DESCS_COUNT, 825 + &ep->buf_dma, GFP_KERNEL); 826 + if (!ep->buf) { 827 + kfree(ep->ep.name); 828 + ep->ep.name = NULL; 829 + return NULL; 830 + } 831 + ep->epn.descs = ep->buf + AST_VHUB_EPn_MAX_PACKET; 832 + ep->epn.descs_dma = ep->buf_dma + AST_VHUB_EPn_MAX_PACKET; 833 + 834 + usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EPn_MAX_PACKET); 835 + list_add_tail(&ep->ep.ep_list, &d->gadget.ep_list); 836 + ep->ep.caps.type_iso = true; 837 + ep->ep.caps.type_bulk = true; 838 + ep->ep.caps.type_int = true; 839 + ep->ep.caps.dir_in = true; 840 + ep->ep.caps.dir_out = true; 841 + 842 + return ep; 843 + }
+829
drivers/usb/gadget/udc/aspeed-vhub/hub.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget 4 + * 5 + * hub.c - virtual hub handling 6 + * 7 + * Copyright 2017 IBM Corporation 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License as published by 11 + * the Free Software Foundation; either version 2 of the License, or 12 + * (at your option) any later version. 13 + */ 14 + 15 + #include <linux/kernel.h> 16 + #include <linux/module.h> 17 + #include <linux/platform_device.h> 18 + #include <linux/delay.h> 19 + #include <linux/ioport.h> 20 + #include <linux/slab.h> 21 + #include <linux/errno.h> 22 + #include <linux/list.h> 23 + #include <linux/interrupt.h> 24 + #include <linux/proc_fs.h> 25 + #include <linux/prefetch.h> 26 + #include <linux/clk.h> 27 + #include <linux/usb/gadget.h> 28 + #include <linux/of.h> 29 + #include <linux/of_gpio.h> 30 + #include <linux/regmap.h> 31 + #include <linux/dma-mapping.h> 32 + #include <linux/bcd.h> 33 + #include <linux/version.h> 34 + #include <linux/usb.h> 35 + #include <linux/usb/hcd.h> 36 + 37 + #include "vhub.h" 38 + 39 + /* usb 2.0 hub device descriptor 40 + * 41 + * A few things we may want to improve here: 42 + * 43 + * - We may need to indicate TT support 44 + * - We may need a device qualifier descriptor 45 + * as devices can pretend to be usb1 or 2 46 + * - Make vid/did overridable 47 + * - make it look like usb1 if usb1 mode forced 48 + */ 49 + #define KERNEL_REL bin2bcd(((LINUX_VERSION_CODE >> 16) & 0x0ff)) 50 + #define KERNEL_VER bin2bcd(((LINUX_VERSION_CODE >> 8) & 0x0ff)) 51 + 52 + enum { 53 + AST_VHUB_STR_MANUF = 3, 54 + AST_VHUB_STR_PRODUCT = 2, 55 + AST_VHUB_STR_SERIAL = 1, 56 + }; 57 + 58 + static const struct usb_device_descriptor ast_vhub_dev_desc = { 59 + .bLength = USB_DT_DEVICE_SIZE, 60 + .bDescriptorType = USB_DT_DEVICE, 61 + .bcdUSB = cpu_to_le16(0x0200), 62 + .bDeviceClass = USB_CLASS_HUB, 63 + .bDeviceSubClass = 0, 64 + .bDeviceProtocol = 1, 65 + .bMaxPacketSize0 = 64, 66 + .idVendor = cpu_to_le16(0x1d6b), 67 + .idProduct = cpu_to_le16(0x0107), 68 + .bcdDevice = cpu_to_le16(0x0100), 69 + .iManufacturer = AST_VHUB_STR_MANUF, 70 + .iProduct = AST_VHUB_STR_PRODUCT, 71 + .iSerialNumber = AST_VHUB_STR_SERIAL, 72 + .bNumConfigurations = 1, 73 + }; 74 + 75 + /* Patches to the above when forcing USB1 mode */ 76 + static void ast_vhub_patch_dev_desc_usb1(struct usb_device_descriptor *desc) 77 + { 78 + desc->bcdUSB = cpu_to_le16(0x0100); 79 + desc->bDeviceProtocol = 0; 80 + } 81 + 82 + /* 83 + * Configuration descriptor: same comments as above 84 + * regarding handling USB1 mode. 85 + */ 86 + 87 + /* 88 + * We don't use sizeof() as Linux definition of 89 + * struct usb_endpoint_descriptor contains 2 90 + * extra bytes 91 + */ 92 + #define AST_VHUB_CONF_DESC_SIZE (USB_DT_CONFIG_SIZE + \ 93 + USB_DT_INTERFACE_SIZE + \ 94 + USB_DT_ENDPOINT_SIZE) 95 + 96 + static const struct ast_vhub_full_cdesc { 97 + struct usb_config_descriptor cfg; 98 + struct usb_interface_descriptor intf; 99 + struct usb_endpoint_descriptor ep; 100 + } __attribute__ ((packed)) ast_vhub_conf_desc = { 101 + .cfg = { 102 + .bLength = USB_DT_CONFIG_SIZE, 103 + .bDescriptorType = USB_DT_CONFIG, 104 + .wTotalLength = cpu_to_le16(AST_VHUB_CONF_DESC_SIZE), 105 + .bNumInterfaces = 1, 106 + .bConfigurationValue = 1, 107 + .iConfiguration = 0, 108 + .bmAttributes = USB_CONFIG_ATT_ONE | 109 + USB_CONFIG_ATT_SELFPOWER | 110 + USB_CONFIG_ATT_WAKEUP, 111 + .bMaxPower = 0, 112 + }, 113 + .intf = { 114 + .bLength = USB_DT_INTERFACE_SIZE, 115 + .bDescriptorType = USB_DT_INTERFACE, 116 + .bInterfaceNumber = 0, 117 + .bAlternateSetting = 0, 118 + .bNumEndpoints = 1, 119 + .bInterfaceClass = USB_CLASS_HUB, 120 + .bInterfaceSubClass = 0, 121 + .bInterfaceProtocol = 0, 122 + .iInterface = 0, 123 + }, 124 + .ep = { 125 + .bLength = USB_DT_ENDPOINT_SIZE, 126 + .bDescriptorType = USB_DT_ENDPOINT, 127 + .bEndpointAddress = 0x81, 128 + .bmAttributes = USB_ENDPOINT_XFER_INT, 129 + .wMaxPacketSize = cpu_to_le16(1), 130 + .bInterval = 0x0c, 131 + }, 132 + }; 133 + 134 + #define AST_VHUB_HUB_DESC_SIZE (USB_DT_HUB_NONVAR_SIZE + 2) 135 + 136 + static const struct usb_hub_descriptor ast_vhub_hub_desc = { 137 + .bDescLength = AST_VHUB_HUB_DESC_SIZE, 138 + .bDescriptorType = USB_DT_HUB, 139 + .bNbrPorts = AST_VHUB_NUM_PORTS, 140 + .wHubCharacteristics = cpu_to_le16(HUB_CHAR_NO_LPSM), 141 + .bPwrOn2PwrGood = 10, 142 + .bHubContrCurrent = 0, 143 + .u.hs.DeviceRemovable[0] = 0, 144 + .u.hs.DeviceRemovable[1] = 0xff, 145 + }; 146 + 147 + /* 148 + * These strings converted to UTF-16 must be smaller than 149 + * our EP0 buffer. 150 + */ 151 + static const struct usb_string ast_vhub_str_array[] = { 152 + { 153 + .id = AST_VHUB_STR_SERIAL, 154 + .s = "00000000" 155 + }, 156 + { 157 + .id = AST_VHUB_STR_PRODUCT, 158 + .s = "USB Virtual Hub" 159 + }, 160 + { 161 + .id = AST_VHUB_STR_MANUF, 162 + .s = "Aspeed" 163 + }, 164 + { } 165 + }; 166 + 167 + static const struct usb_gadget_strings ast_vhub_strings = { 168 + .language = 0x0409, 169 + .strings = (struct usb_string *)ast_vhub_str_array 170 + }; 171 + 172 + static int ast_vhub_hub_dev_status(struct ast_vhub_ep *ep, 173 + u16 wIndex, u16 wValue) 174 + { 175 + u8 st0; 176 + 177 + EPDBG(ep, "GET_STATUS(dev)\n"); 178 + 179 + /* 180 + * Mark it as self-powered, I doubt the BMC is powered off 181 + * the USB bus ... 182 + */ 183 + st0 = 1 << USB_DEVICE_SELF_POWERED; 184 + 185 + /* 186 + * Need to double check how remote wakeup actually works 187 + * on that chip and what triggers it. 188 + */ 189 + if (ep->vhub->wakeup_en) 190 + st0 |= 1 << USB_DEVICE_REMOTE_WAKEUP; 191 + 192 + return ast_vhub_simple_reply(ep, st0, 0); 193 + } 194 + 195 + static int ast_vhub_hub_ep_status(struct ast_vhub_ep *ep, 196 + u16 wIndex, u16 wValue) 197 + { 198 + int ep_num; 199 + u8 st0 = 0; 200 + 201 + ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK; 202 + EPDBG(ep, "GET_STATUS(ep%d)\n", ep_num); 203 + 204 + /* On the hub we have only EP 0 and 1 */ 205 + if (ep_num == 1) { 206 + if (ep->vhub->ep1_stalled) 207 + st0 |= 1 << USB_ENDPOINT_HALT; 208 + } else if (ep_num != 0) 209 + return std_req_stall; 210 + 211 + return ast_vhub_simple_reply(ep, st0, 0); 212 + } 213 + 214 + static int ast_vhub_hub_dev_feature(struct ast_vhub_ep *ep, 215 + u16 wIndex, u16 wValue, 216 + bool is_set) 217 + { 218 + EPDBG(ep, "%s_FEATURE(dev val=%02x)\n", 219 + is_set ? "SET" : "CLEAR", wValue); 220 + 221 + if (wValue != USB_DEVICE_REMOTE_WAKEUP) 222 + return std_req_stall; 223 + 224 + ep->vhub->wakeup_en = is_set; 225 + EPDBG(ep, "Hub remote wakeup %s\n", 226 + is_set ? "enabled" : "disabled"); 227 + 228 + return std_req_complete; 229 + } 230 + 231 + static int ast_vhub_hub_ep_feature(struct ast_vhub_ep *ep, 232 + u16 wIndex, u16 wValue, 233 + bool is_set) 234 + { 235 + int ep_num; 236 + u32 reg; 237 + 238 + ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK; 239 + EPDBG(ep, "%s_FEATURE(ep%d val=%02x)\n", 240 + is_set ? "SET" : "CLEAR", ep_num, wValue); 241 + 242 + if (ep_num > 1) 243 + return std_req_stall; 244 + if (wValue != USB_ENDPOINT_HALT) 245 + return std_req_stall; 246 + if (ep_num == 0) 247 + return std_req_complete; 248 + 249 + EPDBG(ep, "%s stall on EP 1\n", 250 + is_set ? "setting" : "clearing"); 251 + 252 + ep->vhub->ep1_stalled = is_set; 253 + reg = readl(ep->vhub->regs + AST_VHUB_EP1_CTRL); 254 + if (is_set) { 255 + reg |= VHUB_EP1_CTRL_STALL; 256 + } else { 257 + reg &= ~VHUB_EP1_CTRL_STALL; 258 + reg |= VHUB_EP1_CTRL_RESET_TOGGLE; 259 + } 260 + writel(reg, ep->vhub->regs + AST_VHUB_EP1_CTRL); 261 + 262 + return std_req_complete; 263 + } 264 + 265 + static int ast_vhub_rep_desc(struct ast_vhub_ep *ep, 266 + u8 desc_type, u16 len) 267 + { 268 + size_t dsize; 269 + 270 + EPDBG(ep, "GET_DESCRIPTOR(type:%d)\n", desc_type); 271 + 272 + /* 273 + * Copy first to EP buffer and send from there, so 274 + * we can do some in-place patching if needed. We know 275 + * the EP buffer is big enough but ensure that doesn't 276 + * change. We do that now rather than later after we 277 + * have checked sizes etc... to avoid a gcc bug where 278 + * it thinks len is constant and barfs about read 279 + * overflows in memcpy. 280 + */ 281 + switch(desc_type) { 282 + case USB_DT_DEVICE: 283 + dsize = USB_DT_DEVICE_SIZE; 284 + memcpy(ep->buf, &ast_vhub_dev_desc, dsize); 285 + BUILD_BUG_ON(dsize > sizeof(ast_vhub_dev_desc)); 286 + BUILD_BUG_ON(USB_DT_DEVICE_SIZE >= AST_VHUB_EP0_MAX_PACKET); 287 + break; 288 + case USB_DT_CONFIG: 289 + dsize = AST_VHUB_CONF_DESC_SIZE; 290 + memcpy(ep->buf, &ast_vhub_conf_desc, dsize); 291 + BUILD_BUG_ON(dsize > sizeof(ast_vhub_conf_desc)); 292 + BUILD_BUG_ON(AST_VHUB_CONF_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET); 293 + break; 294 + case USB_DT_HUB: 295 + dsize = AST_VHUB_HUB_DESC_SIZE; 296 + memcpy(ep->buf, &ast_vhub_hub_desc, dsize); 297 + BUILD_BUG_ON(dsize > sizeof(ast_vhub_hub_desc)); 298 + BUILD_BUG_ON(AST_VHUB_HUB_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET); 299 + break; 300 + default: 301 + return std_req_stall; 302 + } 303 + 304 + /* Crop requested length */ 305 + if (len > dsize) 306 + len = dsize; 307 + 308 + /* Patch it if forcing USB1 */ 309 + if (desc_type == USB_DT_DEVICE && ep->vhub->force_usb1) 310 + ast_vhub_patch_dev_desc_usb1(ep->buf); 311 + 312 + /* Shoot it from the EP buffer */ 313 + return ast_vhub_reply(ep, NULL, len); 314 + } 315 + 316 + static int ast_vhub_rep_string(struct ast_vhub_ep *ep, 317 + u8 string_id, u16 lang_id, 318 + u16 len) 319 + { 320 + int rc = usb_gadget_get_string (&ast_vhub_strings, string_id, ep->buf); 321 + 322 + /* 323 + * This should never happen unless we put too big strings in 324 + * the array above 325 + */ 326 + BUG_ON(rc >= AST_VHUB_EP0_MAX_PACKET); 327 + 328 + if (rc < 0) 329 + return std_req_stall; 330 + 331 + /* Shoot it from the EP buffer */ 332 + return ast_vhub_reply(ep, NULL, min_t(u16, rc, len)); 333 + } 334 + 335 + enum std_req_rc ast_vhub_std_hub_request(struct ast_vhub_ep *ep, 336 + struct usb_ctrlrequest *crq) 337 + { 338 + struct ast_vhub *vhub = ep->vhub; 339 + u16 wValue, wIndex, wLength; 340 + 341 + wValue = le16_to_cpu(crq->wValue); 342 + wIndex = le16_to_cpu(crq->wIndex); 343 + wLength = le16_to_cpu(crq->wLength); 344 + 345 + /* First packet, grab speed */ 346 + if (vhub->speed == USB_SPEED_UNKNOWN) { 347 + u32 ustat = readl(vhub->regs + AST_VHUB_USBSTS); 348 + if (ustat & VHUB_USBSTS_HISPEED) 349 + vhub->speed = USB_SPEED_HIGH; 350 + else 351 + vhub->speed = USB_SPEED_FULL; 352 + UDCDBG(vhub, "USB status=%08x speed=%s\n", ustat, 353 + vhub->speed == USB_SPEED_HIGH ? "high" : "full"); 354 + } 355 + 356 + switch ((crq->bRequestType << 8) | crq->bRequest) { 357 + /* SET_ADDRESS */ 358 + case DeviceOutRequest | USB_REQ_SET_ADDRESS: 359 + EPDBG(ep, "SET_ADDRESS: Got address %x\n", wValue); 360 + writel(wValue, vhub->regs + AST_VHUB_CONF); 361 + return std_req_complete; 362 + 363 + /* GET_STATUS */ 364 + case DeviceRequest | USB_REQ_GET_STATUS: 365 + return ast_vhub_hub_dev_status(ep, wIndex, wValue); 366 + case InterfaceRequest | USB_REQ_GET_STATUS: 367 + return ast_vhub_simple_reply(ep, 0, 0); 368 + case EndpointRequest | USB_REQ_GET_STATUS: 369 + return ast_vhub_hub_ep_status(ep, wIndex, wValue); 370 + 371 + /* SET/CLEAR_FEATURE */ 372 + case DeviceOutRequest | USB_REQ_SET_FEATURE: 373 + return ast_vhub_hub_dev_feature(ep, wIndex, wValue, true); 374 + case DeviceOutRequest | USB_REQ_CLEAR_FEATURE: 375 + return ast_vhub_hub_dev_feature(ep, wIndex, wValue, false); 376 + case EndpointOutRequest | USB_REQ_SET_FEATURE: 377 + return ast_vhub_hub_ep_feature(ep, wIndex, wValue, true); 378 + case EndpointOutRequest | USB_REQ_CLEAR_FEATURE: 379 + return ast_vhub_hub_ep_feature(ep, wIndex, wValue, false); 380 + 381 + /* GET/SET_CONFIGURATION */ 382 + case DeviceRequest | USB_REQ_GET_CONFIGURATION: 383 + return ast_vhub_simple_reply(ep, 1); 384 + case DeviceOutRequest | USB_REQ_SET_CONFIGURATION: 385 + if (wValue != 1) 386 + return std_req_stall; 387 + return std_req_complete; 388 + 389 + /* GET_DESCRIPTOR */ 390 + case DeviceRequest | USB_REQ_GET_DESCRIPTOR: 391 + switch (wValue >> 8) { 392 + case USB_DT_DEVICE: 393 + case USB_DT_CONFIG: 394 + return ast_vhub_rep_desc(ep, wValue >> 8, 395 + wLength); 396 + case USB_DT_STRING: 397 + return ast_vhub_rep_string(ep, wValue & 0xff, 398 + wIndex, wLength); 399 + } 400 + return std_req_stall; 401 + 402 + /* GET/SET_INTERFACE */ 403 + case DeviceRequest | USB_REQ_GET_INTERFACE: 404 + return ast_vhub_simple_reply(ep, 0); 405 + case DeviceOutRequest | USB_REQ_SET_INTERFACE: 406 + if (wValue != 0 || wIndex != 0) 407 + return std_req_stall; 408 + return std_req_complete; 409 + } 410 + return std_req_stall; 411 + } 412 + 413 + static void ast_vhub_update_hub_ep1(struct ast_vhub *vhub, 414 + unsigned int port) 415 + { 416 + /* Update HW EP1 response */ 417 + u32 reg = readl(vhub->regs + AST_VHUB_EP1_STS_CHG); 418 + u32 pmask = (1 << (port + 1)); 419 + if (vhub->ports[port].change) 420 + reg |= pmask; 421 + else 422 + reg &= ~pmask; 423 + writel(reg, vhub->regs + AST_VHUB_EP1_STS_CHG); 424 + } 425 + 426 + static void ast_vhub_change_port_stat(struct ast_vhub *vhub, 427 + unsigned int port, 428 + u16 clr_flags, 429 + u16 set_flags, 430 + bool set_c) 431 + { 432 + struct ast_vhub_port *p = &vhub->ports[port]; 433 + u16 prev; 434 + 435 + /* Update port status */ 436 + prev = p->status; 437 + p->status = (prev & ~clr_flags) | set_flags; 438 + DDBG(&p->dev, "port %d status %04x -> %04x (C=%d)\n", 439 + port + 1, prev, p->status, set_c); 440 + 441 + /* Update change bits if needed */ 442 + if (set_c) { 443 + u16 chg = p->status ^ prev; 444 + 445 + /* Only these are relevant for change */ 446 + chg &= USB_PORT_STAT_C_CONNECTION | 447 + USB_PORT_STAT_C_ENABLE | 448 + USB_PORT_STAT_C_SUSPEND | 449 + USB_PORT_STAT_C_OVERCURRENT | 450 + USB_PORT_STAT_C_RESET | 451 + USB_PORT_STAT_C_L1; 452 + p->change |= chg; 453 + 454 + ast_vhub_update_hub_ep1(vhub, port); 455 + } 456 + } 457 + 458 + static void ast_vhub_send_host_wakeup(struct ast_vhub *vhub) 459 + { 460 + u32 reg = readl(vhub->regs + AST_VHUB_CTRL); 461 + UDCDBG(vhub, "Waking up host !\n"); 462 + reg |= VHUB_CTRL_MANUAL_REMOTE_WAKEUP; 463 + writel(reg, vhub->regs + AST_VHUB_CTRL); 464 + } 465 + 466 + void ast_vhub_device_connect(struct ast_vhub *vhub, 467 + unsigned int port, bool on) 468 + { 469 + if (on) 470 + ast_vhub_change_port_stat(vhub, port, 0, 471 + USB_PORT_STAT_CONNECTION, true); 472 + else 473 + ast_vhub_change_port_stat(vhub, port, 474 + USB_PORT_STAT_CONNECTION | 475 + USB_PORT_STAT_ENABLE, 476 + 0, true); 477 + 478 + /* 479 + * If the hub is set to wakup the host on connection events 480 + * then send a wakeup. 481 + */ 482 + if (vhub->wakeup_en) 483 + ast_vhub_send_host_wakeup(vhub); 484 + } 485 + 486 + static void ast_vhub_wake_work(struct work_struct *work) 487 + { 488 + struct ast_vhub *vhub = container_of(work, 489 + struct ast_vhub, 490 + wake_work); 491 + unsigned long flags; 492 + unsigned int i; 493 + 494 + /* 495 + * Wake all sleeping ports. If a port is suspended by 496 + * the host suspend (without explicit state suspend), 497 + * we let the normal host wake path deal with it later. 498 + */ 499 + spin_lock_irqsave(&vhub->lock, flags); 500 + for (i = 0; i < AST_VHUB_NUM_PORTS; i++) { 501 + struct ast_vhub_port *p = &vhub->ports[i]; 502 + 503 + if (!(p->status & USB_PORT_STAT_SUSPEND)) 504 + continue; 505 + ast_vhub_change_port_stat(vhub, i, 506 + USB_PORT_STAT_SUSPEND, 507 + 0, true); 508 + ast_vhub_dev_resume(&p->dev); 509 + } 510 + ast_vhub_send_host_wakeup(vhub); 511 + spin_unlock_irqrestore(&vhub->lock, flags); 512 + } 513 + 514 + void ast_vhub_hub_wake_all(struct ast_vhub *vhub) 515 + { 516 + /* 517 + * A device is trying to wake the world, because this 518 + * can recurse into the device, we break the call chain 519 + * using a work queue 520 + */ 521 + schedule_work(&vhub->wake_work); 522 + } 523 + 524 + static void ast_vhub_port_reset(struct ast_vhub *vhub, u8 port) 525 + { 526 + struct ast_vhub_port *p = &vhub->ports[port]; 527 + u16 set, clr, speed; 528 + 529 + /* First mark disabled */ 530 + ast_vhub_change_port_stat(vhub, port, 531 + USB_PORT_STAT_ENABLE | 532 + USB_PORT_STAT_SUSPEND, 533 + USB_PORT_STAT_RESET, 534 + false); 535 + 536 + if (!p->dev.driver) 537 + return; 538 + 539 + /* 540 + * This will either "start" the port or reset the 541 + * device if already started... 542 + */ 543 + ast_vhub_dev_reset(&p->dev); 544 + 545 + /* Grab the right speed */ 546 + speed = p->dev.driver->max_speed; 547 + if (speed == USB_SPEED_UNKNOWN || speed > vhub->speed) 548 + speed = vhub->speed; 549 + 550 + switch (speed) { 551 + case USB_SPEED_LOW: 552 + set = USB_PORT_STAT_LOW_SPEED; 553 + clr = USB_PORT_STAT_HIGH_SPEED; 554 + break; 555 + case USB_SPEED_FULL: 556 + set = 0; 557 + clr = USB_PORT_STAT_LOW_SPEED | 558 + USB_PORT_STAT_HIGH_SPEED; 559 + break; 560 + case USB_SPEED_HIGH: 561 + set = USB_PORT_STAT_HIGH_SPEED; 562 + clr = USB_PORT_STAT_LOW_SPEED; 563 + break; 564 + default: 565 + UDCDBG(vhub, "Unsupported speed %d when" 566 + " connecting device\n", 567 + speed); 568 + return; 569 + } 570 + clr |= USB_PORT_STAT_RESET; 571 + set |= USB_PORT_STAT_ENABLE; 572 + 573 + /* This should ideally be delayed ... */ 574 + ast_vhub_change_port_stat(vhub, port, clr, set, true); 575 + } 576 + 577 + static enum std_req_rc ast_vhub_set_port_feature(struct ast_vhub_ep *ep, 578 + u8 port, u16 feat) 579 + { 580 + struct ast_vhub *vhub = ep->vhub; 581 + struct ast_vhub_port *p; 582 + 583 + if (port == 0 || port > AST_VHUB_NUM_PORTS) 584 + return std_req_stall; 585 + port--; 586 + p = &vhub->ports[port]; 587 + 588 + switch(feat) { 589 + case USB_PORT_FEAT_SUSPEND: 590 + if (!(p->status & USB_PORT_STAT_ENABLE)) 591 + return std_req_complete; 592 + ast_vhub_change_port_stat(vhub, port, 593 + 0, USB_PORT_STAT_SUSPEND, 594 + false); 595 + ast_vhub_dev_suspend(&p->dev); 596 + return std_req_complete; 597 + case USB_PORT_FEAT_RESET: 598 + EPDBG(ep, "Port reset !\n"); 599 + ast_vhub_port_reset(vhub, port); 600 + return std_req_complete; 601 + case USB_PORT_FEAT_POWER: 602 + /* 603 + * On Power-on, we mark the connected flag changed, 604 + * if there's a connected device, some hosts will 605 + * otherwise fail to detect it. 606 + */ 607 + if (p->status & USB_PORT_STAT_CONNECTION) { 608 + p->change |= USB_PORT_STAT_C_CONNECTION; 609 + ast_vhub_update_hub_ep1(vhub, port); 610 + } 611 + return std_req_complete; 612 + case USB_PORT_FEAT_TEST: 613 + case USB_PORT_FEAT_INDICATOR: 614 + /* We don't do anything with these */ 615 + return std_req_complete; 616 + } 617 + return std_req_stall; 618 + } 619 + 620 + static enum std_req_rc ast_vhub_clr_port_feature(struct ast_vhub_ep *ep, 621 + u8 port, u16 feat) 622 + { 623 + struct ast_vhub *vhub = ep->vhub; 624 + struct ast_vhub_port *p; 625 + 626 + if (port == 0 || port > AST_VHUB_NUM_PORTS) 627 + return std_req_stall; 628 + port--; 629 + p = &vhub->ports[port]; 630 + 631 + switch(feat) { 632 + case USB_PORT_FEAT_ENABLE: 633 + ast_vhub_change_port_stat(vhub, port, 634 + USB_PORT_STAT_ENABLE | 635 + USB_PORT_STAT_SUSPEND, 0, 636 + false); 637 + ast_vhub_dev_suspend(&p->dev); 638 + return std_req_complete; 639 + case USB_PORT_FEAT_SUSPEND: 640 + if (!(p->status & USB_PORT_STAT_SUSPEND)) 641 + return std_req_complete; 642 + ast_vhub_change_port_stat(vhub, port, 643 + USB_PORT_STAT_SUSPEND, 0, 644 + false); 645 + ast_vhub_dev_resume(&p->dev); 646 + return std_req_complete; 647 + case USB_PORT_FEAT_POWER: 648 + /* We don't do power control */ 649 + return std_req_complete; 650 + case USB_PORT_FEAT_INDICATOR: 651 + /* We don't have indicators */ 652 + return std_req_complete; 653 + case USB_PORT_FEAT_C_CONNECTION: 654 + case USB_PORT_FEAT_C_ENABLE: 655 + case USB_PORT_FEAT_C_SUSPEND: 656 + case USB_PORT_FEAT_C_OVER_CURRENT: 657 + case USB_PORT_FEAT_C_RESET: 658 + /* Clear state-change feature */ 659 + p->change &= ~(1u << (feat - 16)); 660 + ast_vhub_update_hub_ep1(vhub, port); 661 + return std_req_complete; 662 + } 663 + return std_req_stall; 664 + } 665 + 666 + static enum std_req_rc ast_vhub_get_port_stat(struct ast_vhub_ep *ep, 667 + u8 port) 668 + { 669 + struct ast_vhub *vhub = ep->vhub; 670 + u16 stat, chg; 671 + 672 + if (port == 0 || port > AST_VHUB_NUM_PORTS) 673 + return std_req_stall; 674 + port--; 675 + 676 + stat = vhub->ports[port].status; 677 + chg = vhub->ports[port].change; 678 + 679 + /* We always have power */ 680 + stat |= USB_PORT_STAT_POWER; 681 + 682 + EPDBG(ep, " port status=%04x change=%04x\n", stat, chg); 683 + 684 + return ast_vhub_simple_reply(ep, 685 + stat & 0xff, 686 + stat >> 8, 687 + chg & 0xff, 688 + chg >> 8); 689 + } 690 + 691 + enum std_req_rc ast_vhub_class_hub_request(struct ast_vhub_ep *ep, 692 + struct usb_ctrlrequest *crq) 693 + { 694 + u16 wValue, wIndex, wLength; 695 + 696 + wValue = le16_to_cpu(crq->wValue); 697 + wIndex = le16_to_cpu(crq->wIndex); 698 + wLength = le16_to_cpu(crq->wLength); 699 + 700 + switch ((crq->bRequestType << 8) | crq->bRequest) { 701 + case GetHubStatus: 702 + EPDBG(ep, "GetHubStatus\n"); 703 + return ast_vhub_simple_reply(ep, 0, 0, 0, 0); 704 + case GetPortStatus: 705 + EPDBG(ep, "GetPortStatus(%d)\n", wIndex & 0xff); 706 + return ast_vhub_get_port_stat(ep, wIndex & 0xf); 707 + case GetHubDescriptor: 708 + if (wValue != (USB_DT_HUB << 8)) 709 + return std_req_stall; 710 + EPDBG(ep, "GetHubDescriptor(%d)\n", wIndex & 0xff); 711 + return ast_vhub_rep_desc(ep, USB_DT_HUB, wLength); 712 + case SetHubFeature: 713 + case ClearHubFeature: 714 + EPDBG(ep, "Get/SetHubFeature(%d)\n", wValue); 715 + /* No feature, just complete the requests */ 716 + if (wValue == C_HUB_LOCAL_POWER || 717 + wValue == C_HUB_OVER_CURRENT) 718 + return std_req_complete; 719 + return std_req_stall; 720 + case SetPortFeature: 721 + EPDBG(ep, "SetPortFeature(%d,%d)\n", wIndex & 0xf, wValue); 722 + return ast_vhub_set_port_feature(ep, wIndex & 0xf, wValue); 723 + case ClearPortFeature: 724 + EPDBG(ep, "ClearPortFeature(%d,%d)\n", wIndex & 0xf, wValue); 725 + return ast_vhub_clr_port_feature(ep, wIndex & 0xf, wValue); 726 + default: 727 + EPDBG(ep, "Unknown class request\n"); 728 + } 729 + return std_req_stall; 730 + } 731 + 732 + void ast_vhub_hub_suspend(struct ast_vhub *vhub) 733 + { 734 + unsigned int i; 735 + 736 + UDCDBG(vhub, "USB bus suspend\n"); 737 + 738 + if (vhub->suspended) 739 + return; 740 + 741 + vhub->suspended = true; 742 + 743 + /* 744 + * Forward to unsuspended ports without changing 745 + * their connection status. 746 + */ 747 + for (i = 0; i < AST_VHUB_NUM_PORTS; i++) { 748 + struct ast_vhub_port *p = &vhub->ports[i]; 749 + 750 + if (!(p->status & USB_PORT_STAT_SUSPEND)) 751 + ast_vhub_dev_suspend(&p->dev); 752 + } 753 + } 754 + 755 + void ast_vhub_hub_resume(struct ast_vhub *vhub) 756 + { 757 + unsigned int i; 758 + 759 + UDCDBG(vhub, "USB bus resume\n"); 760 + 761 + if (!vhub->suspended) 762 + return; 763 + 764 + vhub->suspended = false; 765 + 766 + /* 767 + * Forward to unsuspended ports without changing 768 + * their connection status. 769 + */ 770 + for (i = 0; i < AST_VHUB_NUM_PORTS; i++) { 771 + struct ast_vhub_port *p = &vhub->ports[i]; 772 + 773 + if (!(p->status & USB_PORT_STAT_SUSPEND)) 774 + ast_vhub_dev_resume(&p->dev); 775 + } 776 + } 777 + 778 + void ast_vhub_hub_reset(struct ast_vhub *vhub) 779 + { 780 + unsigned int i; 781 + 782 + UDCDBG(vhub, "USB bus reset\n"); 783 + 784 + /* 785 + * Is the speed known ? If not we don't care, we aren't 786 + * initialized yet and ports haven't been enabled. 787 + */ 788 + if (vhub->speed == USB_SPEED_UNKNOWN) 789 + return; 790 + 791 + /* We aren't suspended anymore obviously */ 792 + vhub->suspended = false; 793 + 794 + /* No speed set */ 795 + vhub->speed = USB_SPEED_UNKNOWN; 796 + 797 + /* Wakeup not enabled anymore */ 798 + vhub->wakeup_en = false; 799 + 800 + /* 801 + * Clear all port status, disable gadgets and "suspend" 802 + * them. They will be woken up by a port reset. 803 + */ 804 + for (i = 0; i < AST_VHUB_NUM_PORTS; i++) { 805 + struct ast_vhub_port *p = &vhub->ports[i]; 806 + 807 + /* Only keep the connected flag */ 808 + p->status &= USB_PORT_STAT_CONNECTION; 809 + p->change = 0; 810 + 811 + /* Suspend the gadget if any */ 812 + ast_vhub_dev_suspend(&p->dev); 813 + } 814 + 815 + /* Cleanup HW */ 816 + writel(0, vhub->regs + AST_VHUB_CONF); 817 + writel(0, vhub->regs + AST_VHUB_EP0_CTRL); 818 + writel(VHUB_EP1_CTRL_RESET_TOGGLE | 819 + VHUB_EP1_CTRL_ENABLE, 820 + vhub->regs + AST_VHUB_EP1_CTRL); 821 + writel(0, vhub->regs + AST_VHUB_EP1_STS_CHG); 822 + } 823 + 824 + void ast_vhub_init_hub(struct ast_vhub *vhub) 825 + { 826 + vhub->speed = USB_SPEED_UNKNOWN; 827 + INIT_WORK(&vhub->wake_work, ast_vhub_wake_work); 828 + } 829 +
+514
drivers/usb/gadget/udc/aspeed-vhub/vhub.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + #ifndef __ASPEED_VHUB_H 3 + #define __ASPEED_VHUB_H 4 + 5 + /***************************** 6 + * * 7 + * VHUB register definitions * 8 + * * 9 + *****************************/ 10 + 11 + #define AST_VHUB_CTRL 0x00 /* Root Function Control & Status Register */ 12 + #define AST_VHUB_CONF 0x04 /* Root Configuration Setting Register */ 13 + #define AST_VHUB_IER 0x08 /* Interrupt Ctrl Register */ 14 + #define AST_VHUB_ISR 0x0C /* Interrupt Status Register */ 15 + #define AST_VHUB_EP_ACK_IER 0x10 /* Programmable Endpoint Pool ACK Interrupt Enable Register */ 16 + #define AST_VHUB_EP_NACK_IER 0x14 /* Programmable Endpoint Pool NACK Interrupt Enable Register */ 17 + #define AST_VHUB_EP_ACK_ISR 0x18 /* Programmable Endpoint Pool ACK Interrupt Status Register */ 18 + #define AST_VHUB_EP_NACK_ISR 0x1C /* Programmable Endpoint Pool NACK Interrupt Status Register */ 19 + #define AST_VHUB_SW_RESET 0x20 /* Device Controller Soft Reset Enable Register */ 20 + #define AST_VHUB_USBSTS 0x24 /* USB Status Register */ 21 + #define AST_VHUB_EP_TOGGLE 0x28 /* Programmable Endpoint Pool Data Toggle Value Set */ 22 + #define AST_VHUB_ISO_FAIL_ACC 0x2C /* Isochronous Transaction Fail Accumulator */ 23 + #define AST_VHUB_EP0_CTRL 0x30 /* Endpoint 0 Contrl/Status Register */ 24 + #define AST_VHUB_EP0_DATA 0x34 /* Base Address of Endpoint 0 In/OUT Data Buffer Register */ 25 + #define AST_VHUB_EP1_CTRL 0x38 /* Endpoint 1 Contrl/Status Register */ 26 + #define AST_VHUB_EP1_STS_CHG 0x3C /* Endpoint 1 Status Change Bitmap Data */ 27 + #define AST_VHUB_SETUP0 0x80 /* Root Device Setup Data Buffer0 */ 28 + #define AST_VHUB_SETUP1 0x84 /* Root Device Setup Data Buffer1 */ 29 + 30 + /* Main control reg */ 31 + #define VHUB_CTRL_PHY_CLK (1 << 31) 32 + #define VHUB_CTRL_PHY_LOOP_TEST (1 << 25) 33 + #define VHUB_CTRL_DN_PWN (1 << 24) 34 + #define VHUB_CTRL_DP_PWN (1 << 23) 35 + #define VHUB_CTRL_LONG_DESC (1 << 18) 36 + #define VHUB_CTRL_ISO_RSP_CTRL (1 << 17) 37 + #define VHUB_CTRL_SPLIT_IN (1 << 16) 38 + #define VHUB_CTRL_LOOP_T_RESULT (1 << 15) 39 + #define VHUB_CTRL_LOOP_T_STS (1 << 14) 40 + #define VHUB_CTRL_PHY_BIST_RESULT (1 << 13) 41 + #define VHUB_CTRL_PHY_BIST_CTRL (1 << 12) 42 + #define VHUB_CTRL_PHY_RESET_DIS (1 << 11) 43 + #define VHUB_CTRL_SET_TEST_MODE(x) ((x) << 8) 44 + #define VHUB_CTRL_MANUAL_REMOTE_WAKEUP (1 << 4) 45 + #define VHUB_CTRL_AUTO_REMOTE_WAKEUP (1 << 3) 46 + #define VHUB_CTRL_CLK_STOP_SUSPEND (1 << 2) 47 + #define VHUB_CTRL_FULL_SPEED_ONLY (1 << 1) 48 + #define VHUB_CTRL_UPSTREAM_CONNECT (1 << 0) 49 + 50 + /* IER & ISR */ 51 + #define VHUB_IRQ_USB_CMD_DEADLOCK (1 << 18) 52 + #define VHUB_IRQ_EP_POOL_NAK (1 << 17) 53 + #define VHUB_IRQ_EP_POOL_ACK_STALL (1 << 16) 54 + #define VHUB_IRQ_DEVICE5 (1 << 13) 55 + #define VHUB_IRQ_DEVICE4 (1 << 12) 56 + #define VHUB_IRQ_DEVICE3 (1 << 11) 57 + #define VHUB_IRQ_DEVICE2 (1 << 10) 58 + #define VHUB_IRQ_DEVICE1 (1 << 9) 59 + #define VHUB_IRQ_BUS_RESUME (1 << 8) 60 + #define VHUB_IRQ_BUS_SUSPEND (1 << 7) 61 + #define VHUB_IRQ_BUS_RESET (1 << 6) 62 + #define VHUB_IRQ_HUB_EP1_IN_DATA_ACK (1 << 5) 63 + #define VHUB_IRQ_HUB_EP0_IN_DATA_NAK (1 << 4) 64 + #define VHUB_IRQ_HUB_EP0_IN_ACK_STALL (1 << 3) 65 + #define VHUB_IRQ_HUB_EP0_OUT_NAK (1 << 2) 66 + #define VHUB_IRQ_HUB_EP0_OUT_ACK_STALL (1 << 1) 67 + #define VHUB_IRQ_HUB_EP0_SETUP (1 << 0) 68 + #define VHUB_IRQ_ACK_ALL 0x1ff 69 + 70 + /* SW reset reg */ 71 + #define VHUB_SW_RESET_EP_POOL (1 << 9) 72 + #define VHUB_SW_RESET_DMA_CONTROLLER (1 << 8) 73 + #define VHUB_SW_RESET_DEVICE5 (1 << 5) 74 + #define VHUB_SW_RESET_DEVICE4 (1 << 4) 75 + #define VHUB_SW_RESET_DEVICE3 (1 << 3) 76 + #define VHUB_SW_RESET_DEVICE2 (1 << 2) 77 + #define VHUB_SW_RESET_DEVICE1 (1 << 1) 78 + #define VHUB_SW_RESET_ROOT_HUB (1 << 0) 79 + #define VHUB_SW_RESET_ALL (VHUB_SW_RESET_EP_POOL | \ 80 + VHUB_SW_RESET_DMA_CONTROLLER | \ 81 + VHUB_SW_RESET_DEVICE5 | \ 82 + VHUB_SW_RESET_DEVICE4 | \ 83 + VHUB_SW_RESET_DEVICE3 | \ 84 + VHUB_SW_RESET_DEVICE2 | \ 85 + VHUB_SW_RESET_DEVICE1 | \ 86 + VHUB_SW_RESET_ROOT_HUB) 87 + /* EP ACK/NACK IRQ masks */ 88 + #define VHUB_EP_IRQ(n) (1 << (n)) 89 + #define VHUB_EP_IRQ_ALL 0x7fff /* 15 EPs */ 90 + 91 + /* USB status reg */ 92 + #define VHUB_USBSTS_HISPEED (1 << 27) 93 + 94 + /* EP toggle */ 95 + #define VHUB_EP_TOGGLE_VALUE (1 << 8) 96 + #define VHUB_EP_TOGGLE_SET_EPNUM(x) ((x) & 0x1f) 97 + 98 + /* HUB EP0 control */ 99 + #define VHUB_EP0_CTRL_STALL (1 << 0) 100 + #define VHUB_EP0_TX_BUFF_RDY (1 << 1) 101 + #define VHUB_EP0_RX_BUFF_RDY (1 << 2) 102 + #define VHUB_EP0_RX_LEN(x) (((x) >> 16) & 0x7f) 103 + #define VHUB_EP0_SET_TX_LEN(x) (((x) & 0x7f) << 8) 104 + 105 + /* HUB EP1 control */ 106 + #define VHUB_EP1_CTRL_RESET_TOGGLE (1 << 2) 107 + #define VHUB_EP1_CTRL_STALL (1 << 1) 108 + #define VHUB_EP1_CTRL_ENABLE (1 << 0) 109 + 110 + /*********************************** 111 + * * 112 + * per-device register definitions * 113 + * * 114 + ***********************************/ 115 + #define AST_VHUB_DEV_EN_CTRL 0x00 116 + #define AST_VHUB_DEV_ISR 0x04 117 + #define AST_VHUB_DEV_EP0_CTRL 0x08 118 + #define AST_VHUB_DEV_EP0_DATA 0x0c 119 + 120 + /* Device enable control */ 121 + #define VHUB_DEV_EN_SET_ADDR(x) ((x) << 8) 122 + #define VHUB_DEV_EN_ADDR_MASK ((0xff) << 8) 123 + #define VHUB_DEV_EN_EP0_NAK_IRQEN (1 << 6) 124 + #define VHUB_DEV_EN_EP0_IN_ACK_IRQEN (1 << 5) 125 + #define VHUB_DEV_EN_EP0_OUT_NAK_IRQEN (1 << 4) 126 + #define VHUB_DEV_EN_EP0_OUT_ACK_IRQEN (1 << 3) 127 + #define VHUB_DEV_EN_EP0_SETUP_IRQEN (1 << 2) 128 + #define VHUB_DEV_EN_SPEED_SEL_HIGH (1 << 1) 129 + #define VHUB_DEV_EN_ENABLE_PORT (1 << 0) 130 + 131 + /* Interrupt status */ 132 + #define VHUV_DEV_IRQ_EP0_IN_DATA_NACK (1 << 4) 133 + #define VHUV_DEV_IRQ_EP0_IN_ACK_STALL (1 << 3) 134 + #define VHUV_DEV_IRQ_EP0_OUT_DATA_NACK (1 << 2) 135 + #define VHUV_DEV_IRQ_EP0_OUT_ACK_STALL (1 << 1) 136 + #define VHUV_DEV_IRQ_EP0_SETUP (1 << 0) 137 + 138 + /* Control bits. 139 + * 140 + * Note: The driver relies on the bulk of those bits 141 + * matching corresponding vHub EP0 control bits 142 + */ 143 + #define VHUB_DEV_EP0_CTRL_STALL VHUB_EP0_CTRL_STALL 144 + #define VHUB_DEV_EP0_TX_BUFF_RDY VHUB_EP0_TX_BUFF_RDY 145 + #define VHUB_DEV_EP0_RX_BUFF_RDY VHUB_EP0_RX_BUFF_RDY 146 + #define VHUB_DEV_EP0_RX_LEN(x) VHUB_EP0_RX_LEN(x) 147 + #define VHUB_DEV_EP0_SET_TX_LEN(x) VHUB_EP0_SET_TX_LEN(x) 148 + 149 + /************************************* 150 + * * 151 + * per-endpoint register definitions * 152 + * * 153 + *************************************/ 154 + 155 + #define AST_VHUB_EP_CONFIG 0x00 156 + #define AST_VHUB_EP_DMA_CTLSTAT 0x04 157 + #define AST_VHUB_EP_DESC_BASE 0x08 158 + #define AST_VHUB_EP_DESC_STATUS 0x0C 159 + 160 + /* EP config reg */ 161 + #define VHUB_EP_CFG_SET_MAX_PKT(x) (((x) & 0x3ff) << 16) 162 + #define VHUB_EP_CFG_AUTO_DATA_DISABLE (1 << 13) 163 + #define VHUB_EP_CFG_STALL_CTRL (1 << 12) 164 + #define VHUB_EP_CFG_SET_EP_NUM(x) (((x) & 0xf) << 8) 165 + #define VHUB_EP_CFG_SET_TYPE(x) ((x) << 5) 166 + #define EP_TYPE_OFF 0 167 + #define EP_TYPE_BULK 1 168 + #define EP_TYPE_INT 2 169 + #define EP_TYPE_ISO 3 170 + #define VHUB_EP_CFG_DIR_OUT (1 << 4) 171 + #define VHUB_EP_CFG_SET_DEV(x) ((x) << 1) 172 + #define VHUB_EP_CFG_ENABLE (1 << 0) 173 + 174 + /* EP DMA control */ 175 + #define VHUB_EP_DMA_PROC_STATUS(x) (((x) >> 4) & 0xf) 176 + #define EP_DMA_PROC_RX_IDLE 0 177 + #define EP_DMA_PROC_TX_IDLE 8 178 + #define VHUB_EP_DMA_IN_LONG_MODE (1 << 3) 179 + #define VHUB_EP_DMA_OUT_CONTIG_MODE (1 << 3) 180 + #define VHUB_EP_DMA_CTRL_RESET (1 << 2) 181 + #define VHUB_EP_DMA_SINGLE_STAGE (1 << 1) 182 + #define VHUB_EP_DMA_DESC_MODE (1 << 0) 183 + 184 + /* EP DMA status */ 185 + #define VHUB_EP_DMA_SET_TX_SIZE(x) ((x) << 16) 186 + #define VHUB_EP_DMA_TX_SIZE(x) (((x) >> 16) & 0x7ff) 187 + #define VHUB_EP_DMA_RPTR(x) (((x) >> 8) & 0xff) 188 + #define VHUB_EP_DMA_SET_RPTR(x) (((x) & 0xff) << 8) 189 + #define VHUB_EP_DMA_SET_CPU_WPTR(x) (x) 190 + #define VHUB_EP_DMA_SINGLE_KICK (1 << 0) /* WPTR = 1 for single mode */ 191 + 192 + /******************************* 193 + * * 194 + * DMA descriptors definitions * 195 + * * 196 + *******************************/ 197 + 198 + /* Desc W1 IN */ 199 + #define VHUB_DSC1_IN_INTERRUPT (1 << 31) 200 + #define VHUB_DSC1_IN_SPID_DATA0 (0 << 14) 201 + #define VHUB_DSC1_IN_SPID_DATA2 (1 << 14) 202 + #define VHUB_DSC1_IN_SPID_DATA1 (2 << 14) 203 + #define VHUB_DSC1_IN_SPID_MDATA (3 << 14) 204 + #define VHUB_DSC1_IN_SET_LEN(x) ((x) & 0xfff) 205 + #define VHUB_DSC1_IN_LEN(x) ((x) & 0xfff) 206 + 207 + /**************************************** 208 + * * 209 + * Data structures and misc definitions * 210 + * * 211 + ****************************************/ 212 + 213 + #define AST_VHUB_NUM_GEN_EPs 15 /* Generic non-0 EPs */ 214 + #define AST_VHUB_NUM_PORTS 5 /* vHub ports */ 215 + #define AST_VHUB_EP0_MAX_PACKET 64 /* EP0's max packet size */ 216 + #define AST_VHUB_EPn_MAX_PACKET 1024 /* Generic EPs max packet size */ 217 + #define AST_VHUB_DESCS_COUNT 256 /* Use 256 descriptor mode (valid 218 + * values are 256 and 32) 219 + */ 220 + 221 + struct ast_vhub; 222 + struct ast_vhub_dev; 223 + 224 + /* 225 + * DMA descriptor (generic EPs only, currently only used 226 + * for IN endpoints 227 + */ 228 + struct ast_vhub_desc { 229 + __le32 w0; 230 + __le32 w1; 231 + }; 232 + 233 + /* A transfer request, either core-originated or internal */ 234 + struct ast_vhub_req { 235 + struct usb_request req; 236 + struct list_head queue; 237 + 238 + /* Actual count written to descriptors (desc mode only) */ 239 + unsigned int act_count; 240 + 241 + /* 242 + * Desc number of the final packet or -1. For non-desc 243 + * mode (or ep0), any >= 0 value means "last packet" 244 + */ 245 + int last_desc; 246 + 247 + /* Request active (pending DMAs) */ 248 + bool active : 1; 249 + 250 + /* Internal request (don't call back core) */ 251 + bool internal : 1; 252 + }; 253 + #define to_ast_req(__ureq) container_of(__ureq, struct ast_vhub_req, req) 254 + 255 + /* Current state of an EP0 */ 256 + enum ep0_state { 257 + ep0_state_token, 258 + ep0_state_data, 259 + ep0_state_status, 260 + }; 261 + 262 + /* 263 + * An endpoint, either generic, ep0, actual gadget EP 264 + * or internal use vhub EP0. vhub EP1 doesn't have an 265 + * associated structure as it's mostly HW managed. 266 + */ 267 + struct ast_vhub_ep { 268 + struct usb_ep ep; 269 + 270 + /* Request queue */ 271 + struct list_head queue; 272 + 273 + /* EP index in the device, 0 means this is an EP0 */ 274 + unsigned int d_idx; 275 + 276 + /* Dev pointer or NULL for vHub EP0 */ 277 + struct ast_vhub_dev *dev; 278 + 279 + /* vHub itself */ 280 + struct ast_vhub *vhub; 281 + 282 + /* 283 + * DMA buffer for EP0, fallback DMA buffer for misaligned 284 + * OUT transfers for generic EPs 285 + */ 286 + void *buf; 287 + dma_addr_t buf_dma; 288 + 289 + /* The rest depends on the EP type */ 290 + union { 291 + /* EP0 (either device or vhub) */ 292 + struct { 293 + /* 294 + * EP0 registers are "similar" for 295 + * vHub and devices but located in 296 + * different places. 297 + */ 298 + void __iomem *ctlstat; 299 + void __iomem *setup; 300 + 301 + /* Current state & direction */ 302 + enum ep0_state state; 303 + bool dir_in; 304 + 305 + /* Internal use request */ 306 + struct ast_vhub_req req; 307 + } ep0; 308 + 309 + /* Generic endpoint (aka EPn) */ 310 + struct { 311 + /* Registers */ 312 + void __iomem *regs; 313 + 314 + /* Index in global pool (0..14) */ 315 + unsigned int g_idx; 316 + 317 + /* DMA Descriptors */ 318 + struct ast_vhub_desc *descs; 319 + dma_addr_t descs_dma; 320 + unsigned int d_next; 321 + unsigned int d_last; 322 + unsigned int dma_conf; 323 + 324 + /* Max chunk size for IN EPs */ 325 + unsigned int chunk_max; 326 + 327 + /* State flags */ 328 + bool is_in : 1; 329 + bool is_iso : 1; 330 + bool stalled : 1; 331 + bool wedged : 1; 332 + bool enabled : 1; 333 + bool desc_mode : 1; 334 + } epn; 335 + }; 336 + }; 337 + #define to_ast_ep(__uep) container_of(__uep, struct ast_vhub_ep, ep) 338 + 339 + /* A device attached to a vHub port */ 340 + struct ast_vhub_dev { 341 + struct ast_vhub *vhub; 342 + void __iomem *regs; 343 + 344 + /* Device index (0...4) and name string */ 345 + unsigned int index; 346 + const char *name; 347 + 348 + /* sysfs enclosure for the gadget gunk */ 349 + struct device *port_dev; 350 + 351 + /* Link to gadget core */ 352 + struct usb_gadget gadget; 353 + struct usb_gadget_driver *driver; 354 + bool registered : 1; 355 + bool wakeup_en : 1; 356 + bool suspended : 1; 357 + bool enabled : 1; 358 + 359 + /* Endpoint structures */ 360 + struct ast_vhub_ep ep0; 361 + struct ast_vhub_ep *epns[AST_VHUB_NUM_GEN_EPs]; 362 + 363 + }; 364 + #define to_ast_dev(__g) container_of(__g, struct ast_vhub_dev, gadget) 365 + 366 + /* Per vhub port stateinfo structure */ 367 + struct ast_vhub_port { 368 + /* Port status & status change registers */ 369 + u16 status; 370 + u16 change; 371 + 372 + /* Associated device slot */ 373 + struct ast_vhub_dev dev; 374 + }; 375 + 376 + /* Global vhub structure */ 377 + struct ast_vhub { 378 + struct platform_device *pdev; 379 + void __iomem *regs; 380 + int irq; 381 + spinlock_t lock; 382 + struct work_struct wake_work; 383 + struct clk *clk; 384 + 385 + /* EP0 DMA buffers allocated in one chunk */ 386 + void *ep0_bufs; 387 + dma_addr_t ep0_bufs_dma; 388 + 389 + /* EP0 of the vhub itself */ 390 + struct ast_vhub_ep ep0; 391 + 392 + /* State of vhub ep1 */ 393 + bool ep1_stalled : 1; 394 + 395 + /* Per-port info */ 396 + struct ast_vhub_port ports[AST_VHUB_NUM_PORTS]; 397 + 398 + /* Generic EP data structures */ 399 + struct ast_vhub_ep epns[AST_VHUB_NUM_GEN_EPs]; 400 + 401 + /* Upstream bus is suspended ? */ 402 + bool suspended : 1; 403 + 404 + /* Hub itself can signal remote wakeup */ 405 + bool wakeup_en : 1; 406 + 407 + /* Force full speed only */ 408 + bool force_usb1 : 1; 409 + 410 + /* Upstream bus speed captured at bus reset */ 411 + unsigned int speed; 412 + }; 413 + 414 + /* Standard request handlers result codes */ 415 + enum std_req_rc { 416 + std_req_stall = -1, /* Stall requested */ 417 + std_req_complete = 0, /* Request completed with no data */ 418 + std_req_data = 1, /* Request completed with data */ 419 + std_req_driver = 2, /* Pass to driver pls */ 420 + }; 421 + 422 + #ifdef CONFIG_USB_GADGET_VERBOSE 423 + #define UDCVDBG(u, fmt...) dev_dbg(&(u)->pdev->dev, fmt) 424 + 425 + #define EPVDBG(ep, fmt, ...) do { \ 426 + dev_dbg(&(ep)->vhub->pdev->dev, \ 427 + "%s:EP%d " fmt, \ 428 + (ep)->dev ? (ep)->dev->name : "hub", \ 429 + (ep)->d_idx, ##__VA_ARGS__); \ 430 + } while(0) 431 + 432 + #define DVDBG(d, fmt, ...) do { \ 433 + dev_dbg(&(d)->vhub->pdev->dev, \ 434 + "%s " fmt, (d)->name, \ 435 + ##__VA_ARGS__); \ 436 + } while(0) 437 + 438 + #else 439 + #define UDCVDBG(u, fmt...) do { } while(0) 440 + #define EPVDBG(ep, fmt, ...) do { } while(0) 441 + #define DVDBG(d, fmt, ...) do { } while(0) 442 + #endif 443 + 444 + #ifdef CONFIG_USB_GADGET_DEBUG 445 + #define UDCDBG(u, fmt...) dev_dbg(&(u)->pdev->dev, fmt) 446 + 447 + #define EPDBG(ep, fmt, ...) do { \ 448 + dev_dbg(&(ep)->vhub->pdev->dev, \ 449 + "%s:EP%d " fmt, \ 450 + (ep)->dev ? (ep)->dev->name : "hub", \ 451 + (ep)->d_idx, ##__VA_ARGS__); \ 452 + } while(0) 453 + 454 + #define DDBG(d, fmt, ...) do { \ 455 + dev_dbg(&(d)->vhub->pdev->dev, \ 456 + "%s " fmt, (d)->name, \ 457 + ##__VA_ARGS__); \ 458 + } while(0) 459 + #else 460 + #define UDCDBG(u, fmt...) do { } while(0) 461 + #define EPDBG(ep, fmt, ...) do { } while(0) 462 + #define DDBG(d, fmt, ...) do { } while(0) 463 + #endif 464 + 465 + /* core.c */ 466 + void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req, 467 + int status); 468 + void ast_vhub_nuke(struct ast_vhub_ep *ep, int status); 469 + struct usb_request *ast_vhub_alloc_request(struct usb_ep *u_ep, 470 + gfp_t gfp_flags); 471 + void ast_vhub_free_request(struct usb_ep *u_ep, struct usb_request *u_req); 472 + void ast_vhub_init_hw(struct ast_vhub *vhub); 473 + 474 + /* ep0.c */ 475 + void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack); 476 + void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep); 477 + void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep, 478 + struct ast_vhub_dev *dev); 479 + int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len); 480 + int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...); 481 + #define ast_vhub_simple_reply(udc, ...) \ 482 + __ast_vhub_simple_reply((udc), \ 483 + sizeof((u8[]) { __VA_ARGS__ })/sizeof(u8), \ 484 + __VA_ARGS__) 485 + 486 + /* hub.c */ 487 + void ast_vhub_init_hub(struct ast_vhub *vhub); 488 + enum std_req_rc ast_vhub_std_hub_request(struct ast_vhub_ep *ep, 489 + struct usb_ctrlrequest *crq); 490 + enum std_req_rc ast_vhub_class_hub_request(struct ast_vhub_ep *ep, 491 + struct usb_ctrlrequest *crq); 492 + void ast_vhub_device_connect(struct ast_vhub *vhub, unsigned int port, 493 + bool on); 494 + void ast_vhub_hub_suspend(struct ast_vhub *vhub); 495 + void ast_vhub_hub_resume(struct ast_vhub *vhub); 496 + void ast_vhub_hub_reset(struct ast_vhub *vhub); 497 + void ast_vhub_hub_wake_all(struct ast_vhub *vhub); 498 + 499 + /* dev.c */ 500 + int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx); 501 + void ast_vhub_del_dev(struct ast_vhub_dev *d); 502 + void ast_vhub_dev_irq(struct ast_vhub_dev *d); 503 + int ast_vhub_std_dev_request(struct ast_vhub_ep *ep, 504 + struct usb_ctrlrequest *crq); 505 + 506 + /* epn.c */ 507 + void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep); 508 + void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep); 509 + struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr); 510 + void ast_vhub_dev_suspend(struct ast_vhub_dev *d); 511 + void ast_vhub_dev_resume(struct ast_vhub_dev *d); 512 + void ast_vhub_dev_reset(struct ast_vhub_dev *d); 513 + 514 + #endif /* __ASPEED_VHUB_H */
+10 -12
drivers/usb/gadget/udc/atmel_usba_udc.c
··· 20 20 #include <linux/ctype.h> 21 21 #include <linux/usb/ch9.h> 22 22 #include <linux/usb/gadget.h> 23 - #include <linux/usb/atmel_usba_udc.h> 24 23 #include <linux/delay.h> 25 24 #include <linux/of.h> 26 25 #include <linux/irq.h> ··· 416 417 static int vbus_is_present(struct usba_udc *udc) 417 418 { 418 419 if (udc->vbus_pin) 419 - return gpiod_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted; 420 + return gpiod_get_value(udc->vbus_pin); 420 421 421 422 /* No Vbus detection: Assume always present */ 422 423 return 1; ··· 2075 2076 2076 2077 udc->vbus_pin = devm_gpiod_get_optional(&pdev->dev, "atmel,vbus", 2077 2078 GPIOD_IN); 2078 - udc->vbus_pin_inverted = gpiod_is_active_low(udc->vbus_pin); 2079 2079 2080 2080 if (fifo_mode == 0) { 2081 2081 pp = NULL; ··· 2277 2279 if (udc->vbus_pin) { 2278 2280 irq_set_status_flags(gpiod_to_irq(udc->vbus_pin), IRQ_NOAUTOEN); 2279 2281 ret = devm_request_threaded_irq(&pdev->dev, 2280 - gpiod_to_irq(udc->vbus_pin), NULL, 2281 - usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS, 2282 - "atmel_usba_udc", udc); 2283 - if (ret) { 2284 - udc->vbus_pin = NULL; 2285 - dev_warn(&udc->pdev->dev, 2286 - "failed to request vbus irq; " 2287 - "assuming always on\n"); 2288 - } 2282 + gpiod_to_irq(udc->vbus_pin), NULL, 2283 + usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS, 2284 + "atmel_usba_udc", udc); 2285 + if (ret) { 2286 + udc->vbus_pin = NULL; 2287 + dev_warn(&udc->pdev->dev, 2288 + "failed to request vbus irq; " 2289 + "assuming always on\n"); 2290 + } 2289 2291 } 2290 2292 2291 2293 ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
-1
drivers/usb/gadget/udc/atmel_usba_udc.h
··· 326 326 const struct usba_udc_errata *errata; 327 327 int irq; 328 328 struct gpio_desc *vbus_pin; 329 - int vbus_pin_inverted; 330 329 int num_ep; 331 330 int configured_ep; 332 331 struct usba_fifo_cfg *fifo_cfg;
+6
drivers/usb/gadget/udc/core.c
··· 244 244 * Returns zero, or a negative error code. Endpoints that are not enabled 245 245 * report errors; errors will also be 246 246 * reported when the usb peripheral is disconnected. 247 + * 248 + * If and only if @req is successfully queued (the return value is zero), 249 + * @req->complete() will be called exactly once, when the Gadget core and 250 + * UDC are finished with the request. When the completion function is called, 251 + * control of the request is returned to the device driver which submitted it. 252 + * The completion handler may then immediately free or reuse @req. 247 253 */ 248 254 int usb_ep_queue(struct usb_ep *ep, 249 255 struct usb_request *req, gfp_t gfp_flags)
+1
drivers/usb/gadget/udc/fsl_udc_core.c
··· 253 253 portctrl |= PORTSCX_PTW_16BIT; 254 254 /* fall through */ 255 255 case FSL_USB2_PHY_UTMI: 256 + case FSL_USB2_PHY_UTMI_DUAL: 256 257 if (udc->pdata->have_sysif_regs) { 257 258 if (udc->pdata->controller_ver) { 258 259 /* controller version 1.6 or above */
+25 -12
drivers/usb/gadget/udc/renesas_usb3.c
··· 333 333 struct extcon_dev *extcon; 334 334 struct work_struct extcon_work; 335 335 struct phy *phy; 336 + struct dentry *dentry; 336 337 337 338 struct renesas_usb3_ep *usb3_ep; 338 339 int num_usb3_eps; ··· 623 622 usb3_usb2_pullup(usb3, 0); 624 623 usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON); 625 624 usb3_reset_epc(usb3); 625 + usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM | USB_INT_1_B3_PLLWKUP | 626 + USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE | 627 + USB_INT_1_SPEED | USB_INT_1_B3_WRMRST | 628 + USB_INT_1_B3_HOTRST | USB_INT_1_B2_SPND | 629 + USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST); 630 + usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON); 631 + usb3_init_epc_registers(usb3); 626 632 627 633 if (usb3->driver) 628 634 usb3->driver->disconnect(&usb3->gadget); ··· 2401 2393 2402 2394 file = debugfs_create_file("b_device", 0644, root, usb3, 2403 2395 &renesas_usb3_b_device_fops); 2404 - if (!file) 2396 + if (!file) { 2405 2397 dev_info(dev, "%s: Can't create debugfs mode\n", __func__); 2398 + debugfs_remove_recursive(root); 2399 + } else { 2400 + usb3->dentry = root; 2401 + } 2406 2402 } 2407 2403 2408 2404 /*------- platform_driver ------------------------------------------------*/ ··· 2414 2402 { 2415 2403 struct renesas_usb3 *usb3 = platform_get_drvdata(pdev); 2416 2404 2405 + debugfs_remove_recursive(usb3->dentry); 2417 2406 device_remove_file(&pdev->dev, &dev_attr_role); 2418 2407 2419 2408 usb_del_gadget_udc(&usb3->gadget); 2420 2409 renesas_usb3_dma_free_prd(usb3, &pdev->dev); 2421 2410 2422 2411 __renesas_usb3_ep_free_request(usb3->ep0_req); 2423 - if (usb3->phy) 2424 - phy_put(usb3->phy); 2425 2412 pm_runtime_disable(&pdev->dev); 2426 2413 2427 2414 return 0; ··· 2639 2628 if (ret < 0) 2640 2629 goto err_alloc_prd; 2641 2630 2631 + /* 2632 + * This is optional. So, if this driver cannot get a phy, 2633 + * this driver will not handle a phy anymore. 2634 + */ 2635 + usb3->phy = devm_phy_optional_get(&pdev->dev, "usb"); 2636 + if (IS_ERR(usb3->phy)) { 2637 + ret = PTR_ERR(usb3->phy); 2638 + goto err_add_udc; 2639 + } 2640 + 2641 + pm_runtime_enable(&pdev->dev); 2642 2642 ret = usb_add_gadget_udc(&pdev->dev, &usb3->gadget); 2643 2643 if (ret < 0) 2644 2644 goto err_add_udc; ··· 2658 2636 if (ret < 0) 2659 2637 goto err_dev_create; 2660 2638 2661 - /* 2662 - * This is an optional. So, if this driver cannot get a phy, 2663 - * this driver will not handle a phy anymore. 2664 - */ 2665 - usb3->phy = devm_phy_get(&pdev->dev, "usb"); 2666 - if (IS_ERR(usb3->phy)) 2667 - usb3->phy = NULL; 2668 - 2669 2639 usb3->workaround_for_vbus = priv->workaround_for_vbus; 2670 2640 2671 2641 renesas_usb3_debugfs_init(usb3, &pdev->dev); 2672 2642 2673 2643 dev_info(&pdev->dev, "probed%s\n", usb3->phy ? " with phy" : ""); 2674 - pm_runtime_enable(usb3_to_dev(usb3)); 2675 2644 2676 2645 return 0; 2677 2646
+1 -1
drivers/usb/gadget/usbstring.c
··· 33 33 * characters (which are also widely used in C strings). 34 34 */ 35 35 int 36 - usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf) 36 + usb_gadget_get_string (const struct usb_gadget_strings *table, int id, u8 *buf) 37 37 { 38 38 struct usb_string *s; 39 39 int len;
+2 -1
drivers/usb/mtu3/Kconfig
··· 2 2 3 3 config USB_MTU3 4 4 tristate "MediaTek USB3 Dual Role controller" 5 - depends on EXTCON && (USB || USB_GADGET) 5 + depends on USB || USB_GADGET 6 6 depends on ARCH_MEDIATEK || COMPILE_TEST 7 7 select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD 8 8 help ··· 40 40 config USB_MTU3_DUAL_ROLE 41 41 bool "Dual Role mode" 42 42 depends on ((USB=y || USB=USB_MTU3) && (USB_GADGET=y || USB_GADGET=USB_MTU3)) 43 + depends on (EXTCON=y || EXTCON=USB_MTU3) 43 44 help 44 45 This is the default mode of working of MTU3 controller where 45 46 both host and gadget features are enabled.
-4
drivers/usb/mtu3/mtu3.h
··· 197 197 * @edev: external connector used to detect vbus and iddig changes 198 198 * @vbus_nb: notifier for vbus detection 199 199 * @vbus_nb: notifier for iddig(idpin) detection 200 - * @extcon_reg_dwork: delay work for extcon notifier register, waiting for 201 - * xHCI driver initialization, it's necessary for system bootup 202 - * as device. 203 200 * @is_u3_drd: whether port0 supports usb3.0 dual-role device or not 204 201 * @manual_drd_enabled: it's true when supports dual-role device by debugfs 205 202 * to switch host/device modes depending on user input. ··· 206 209 struct extcon_dev *edev; 207 210 struct notifier_block vbus_nb; 208 211 struct notifier_block id_nb; 209 - struct delayed_work extcon_reg_dwork; 210 212 bool is_u3_drd; 211 213 bool manual_drd_enabled; 212 214 };
+3 -22
drivers/usb/mtu3/mtu3_dr.c
··· 238 238 return 0; 239 239 } 240 240 241 - static void extcon_register_dwork(struct work_struct *work) 242 - { 243 - struct delayed_work *dwork = to_delayed_work(work); 244 - struct otg_switch_mtk *otg_sx = 245 - container_of(dwork, struct otg_switch_mtk, extcon_reg_dwork); 246 - 247 - ssusb_extcon_register(otg_sx); 248 - } 249 - 250 241 /* 251 242 * We provide an interface via debugfs to switch between host and device modes 252 243 * depending on user input. ··· 398 407 { 399 408 struct otg_switch_mtk *otg_sx = &ssusb->otg_switch; 400 409 401 - if (otg_sx->manual_drd_enabled) { 410 + if (otg_sx->manual_drd_enabled) 402 411 ssusb_debugfs_init(ssusb); 403 - } else { 404 - INIT_DELAYED_WORK(&otg_sx->extcon_reg_dwork, 405 - extcon_register_dwork); 406 - 407 - /* 408 - * It is enough to delay 1s for waiting for 409 - * host initialization 410 - */ 411 - schedule_delayed_work(&otg_sx->extcon_reg_dwork, HZ); 412 - } 412 + else 413 + ssusb_extcon_register(otg_sx); 413 414 414 415 return 0; 415 416 } ··· 412 429 413 430 if (otg_sx->manual_drd_enabled) 414 431 ssusb_debugfs_exit(ssusb); 415 - else 416 - cancel_delayed_work(&otg_sx->extcon_reg_dwork); 417 432 }
+2 -6
drivers/usb/mtu3/mtu3_gadget.c
··· 660 660 mtu3_gadget_init_eps(mtu); 661 661 662 662 ret = usb_add_gadget_udc(mtu->dev, &mtu->g); 663 - if (ret) { 663 + if (ret) 664 664 dev_err(mtu->dev, "failed to register udc\n"); 665 - return ret; 666 - } 667 665 668 - usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED); 669 - 670 - return 0; 666 + return ret; 671 667 } 672 668 673 669 void mtu3_gadget_cleanup(struct mtu3 *mtu)
+11 -1
drivers/usb/mtu3/mtu3_gadget_ep0.c
··· 7 7 * Author: Chunfeng.Yun <chunfeng.yun@mediatek.com> 8 8 */ 9 9 10 + #include <linux/iopoll.h> 10 11 #include <linux/usb/composite.h> 11 12 12 13 #include "mtu3.h" ··· 264 263 { 265 264 void __iomem *mbase = mtu->mac_base; 266 265 int handled = 1; 266 + u32 value; 267 267 268 268 switch (le16_to_cpu(setup->wIndex) >> 8) { 269 269 case TEST_J: ··· 293 291 /* no TX completion interrupt, and need restart platform after test */ 294 292 if (mtu->test_mode_nr == TEST_PACKET_MODE) 295 293 ep0_load_test_packet(mtu); 294 + 295 + /* send status before entering test mode. */ 296 + value = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS; 297 + mtu3_writel(mbase, U3D_EP0CSR, value | EP0_SETUPPKTRDY | EP0_DATAEND); 298 + 299 + /* wait for ACK status sent by host */ 300 + readl_poll_timeout(mbase + U3D_EP0CSR, value, 301 + !(value & EP0_DATAEND), 100, 5000); 296 302 297 303 mtu3_writel(mbase, U3D_USB2_TEST_MODE, mtu->test_mode_nr); 298 304 ··· 556 546 struct usb_request *req; 557 547 u32 csr; 558 548 u8 *src; 559 - u8 count; 549 + u32 count; 560 550 u32 maxp; 561 551 562 552 dev_dbg(mtu->dev, "%s\n", __func__);
-24
include/linux/usb/atmel_usba_udc.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * Platform data definitions for Atmel USBA gadget driver. 4 - */ 5 - #ifndef __LINUX_USB_USBA_H 6 - #define __LINUX_USB_USBA_H 7 - 8 - struct usba_ep_data { 9 - char *name; 10 - int index; 11 - int fifo_size; 12 - int nr_banks; 13 - int can_dma; 14 - int can_isoc; 15 - }; 16 - 17 - struct usba_platform_data { 18 - int vbus_pin; 19 - int vbus_pin_inverted; 20 - int num_ep; 21 - struct usba_ep_data ep[0]; 22 - }; 23 - 24 - #endif /* __LINUX_USB_USBA_H */
+1 -1
include/linux/usb/gadget.h
··· 763 763 }; 764 764 765 765 /* put descriptor for string with that id into buf (buflen >= 256) */ 766 - int usb_gadget_get_string(struct usb_gadget_strings *table, int id, u8 *buf); 766 + int usb_gadget_get_string(const struct usb_gadget_strings *table, int id, u8 *buf); 767 767 768 768 /*-------------------------------------------------------------------------*/ 769 769