Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

usb: Add MediaTek USB3 DRD driver

This patch adds support for the MediaTek USB3 controller
integrated into MT8173. It currently supports High-Speed
Peripheral Only mode.

Super-Speed Peripheral, Dual-Role Device and Host Only (xHCI)
modes will be added in the next patchs.

Signed-off-by: Chunfeng Yun <chunfeng.yun@mediatek.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Chunfeng Yun and committed by
Greg Kroah-Hartman
df2069ac 065d48cf

+3860
+2
drivers/usb/Kconfig
··· 95 95 96 96 endif 97 97 98 + source "drivers/usb/mtu3/Kconfig" 99 + 98 100 source "drivers/usb/musb/Kconfig" 99 101 100 102 source "drivers/usb/dwc3/Kconfig"
+1
drivers/usb/Makefile
··· 12 12 obj-$(CONFIG_USB_ISP1760) += isp1760/ 13 13 14 14 obj-$(CONFIG_USB_MON) += mon/ 15 + obj-$(CONFIG_USB_MTU3) += mtu3/ 15 16 16 17 obj-$(CONFIG_PCI) += host/ 17 18 obj-$(CONFIG_USB_EHCI_HCD) += host/
+32
drivers/usb/mtu3/Kconfig
··· 1 + # For MTK USB3.0 IP 2 + 3 + config USB_MTU3 4 + tristate "MediaTek USB3 Dual Role controller" 5 + depends on (USB || USB_GADGET) && HAS_DMA 6 + depends on ARCH_MEDIATEK || COMPILE_TEST 7 + help 8 + Say Y or M here if your system runs on MediaTek SoCs with 9 + Dual Role SuperSpeed USB controller. You can select usb 10 + mode as peripheral role or host role, or both. 11 + 12 + If you don't know what this is, please say N. 13 + 14 + Choose M here to compile this driver as a module, and it 15 + will be called mtu3.ko. 16 + 17 + 18 + if USB_MTU3 19 + choice 20 + bool "MTU3 Mode Selection" 21 + default USB_MTU3_GADGET if (!USB && USB_GADGET) 22 + 23 + config USB_MTU3_GADGET 24 + bool "Gadget only mode" 25 + depends on USB_GADGET=y || USB_GADGET=USB_MTU3 26 + help 27 + Select this when you want to use MTU3 in gadget mode only, 28 + thereby the host feature will be regressed. 29 + 30 + endchoice 31 + 32 + endif
+2
drivers/usb/mtu3/Makefile
··· 1 + obj-$(CONFIG_USB_MTU3) += mtu3.o 2 + mtu3-y := mtu3_plat.o mtu3_core.o mtu3_gadget_ep0.o mtu3_gadget.o mtu3_qmu.o
+341
drivers/usb/mtu3/mtu3.h
··· 1 + /* 2 + * mtu3.h - MediaTek USB3 DRD header 3 + * 4 + * Copyright (C) 2016 MediaTek Inc. 5 + * 6 + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> 7 + * 8 + * This software is licensed under the terms of the GNU General Public 9 + * License version 2, as published by the Free Software Foundation, and 10 + * may be copied, distributed, and modified under those terms. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + */ 18 + 19 + #ifndef __MTU3_H__ 20 + #define __MTU3_H__ 21 + 22 + #include <linux/device.h> 23 + #include <linux/dmapool.h> 24 + #include <linux/interrupt.h> 25 + #include <linux/list.h> 26 + #include <linux/phy/phy.h> 27 + #include <linux/regulator/consumer.h> 28 + #include <linux/usb.h> 29 + #include <linux/usb/ch9.h> 30 + #include <linux/usb/gadget.h> 31 + #include <linux/usb/otg.h> 32 + 33 + struct mtu3; 34 + struct mtu3_ep; 35 + struct mtu3_request; 36 + 37 + #include "mtu3_hw_regs.h" 38 + #include "mtu3_qmu.h" 39 + 40 + #define MU3D_EP_TXCR0(epnum) (U3D_TX1CSR0 + (((epnum) - 1) * 0x10)) 41 + #define MU3D_EP_TXCR1(epnum) (U3D_TX1CSR1 + (((epnum) - 1) * 0x10)) 42 + #define MU3D_EP_TXCR2(epnum) (U3D_TX1CSR2 + (((epnum) - 1) * 0x10)) 43 + 44 + #define MU3D_EP_RXCR0(epnum) (U3D_RX1CSR0 + (((epnum) - 1) * 0x10)) 45 + #define MU3D_EP_RXCR1(epnum) (U3D_RX1CSR1 + (((epnum) - 1) * 0x10)) 46 + #define MU3D_EP_RXCR2(epnum) (U3D_RX1CSR2 + (((epnum) - 1) * 0x10)) 47 + 48 + #define USB_QMU_RQCSR(epnum) (U3D_RXQCSR1 + (((epnum) - 1) * 0x10)) 49 + #define USB_QMU_RQSAR(epnum) (U3D_RXQSAR1 + (((epnum) - 1) * 0x10)) 50 + #define USB_QMU_RQCPR(epnum) (U3D_RXQCPR1 + (((epnum) - 1) * 0x10)) 51 + 52 + #define USB_QMU_TQCSR(epnum) (U3D_TXQCSR1 + (((epnum) - 1) * 0x10)) 53 + #define USB_QMU_TQSAR(epnum) (U3D_TXQSAR1 + (((epnum) - 1) * 0x10)) 54 + #define USB_QMU_TQCPR(epnum) (U3D_TXQCPR1 + (((epnum) - 1) * 0x10)) 55 + 56 + #define SSUSB_U2_CTRL(p) (U3D_SSUSB_U2_CTRL_0P + ((p) * 0x08)) 57 + 58 + #define MTU3_DRIVER_NAME "mtu3" 59 + #define DMA_ADDR_INVALID (~(dma_addr_t)0) 60 + 61 + #define MTU3_EP_ENABLED BIT(0) 62 + #define MTU3_EP_STALL BIT(1) 63 + #define MTU3_EP_WEDGE BIT(2) 64 + #define MTU3_EP_BUSY BIT(3) 65 + 66 + #define MTU3_U2_IP_SLOT_DEFAULT 1 67 + 68 + /** 69 + * Normally the device works on HS or SS, to simplify fifo management, 70 + * devide fifo into some 512B parts, use bitmap to manage it; And 71 + * 128 bits size of bitmap is large enough, that means it can manage 72 + * up to 64KB fifo size. 73 + * NOTE: MTU3_EP_FIFO_UNIT should be power of two 74 + */ 75 + #define MTU3_EP_FIFO_UNIT (1 << 9) 76 + #define MTU3_FIFO_BIT_SIZE 128 77 + #define MTU3_U2_IP_EP0_FIFO_SIZE 64 78 + 79 + /** 80 + * Maximum size of ep0 response buffer for ch9 requests, 81 + * the SET_SEL request uses 6 so far, and GET_STATUS is 2 82 + */ 83 + #define EP0_RESPONSE_BUF 6 84 + 85 + /* device operated link and speed got from DEVICE_CONF register */ 86 + enum mtu3_speed { 87 + MTU3_SPEED_INACTIVE = 0, 88 + MTU3_SPEED_FULL = 1, 89 + MTU3_SPEED_HIGH = 3, 90 + }; 91 + 92 + /** 93 + * @MU3D_EP0_STATE_SETUP: waits for SETUP or received a SETUP 94 + * without data stage. 95 + * @MU3D_EP0_STATE_TX: IN data stage 96 + * @MU3D_EP0_STATE_RX: OUT data stage 97 + * @MU3D_EP0_STATE_TX_END: the last IN data is transferred, and 98 + * waits for its completion interrupt 99 + * @MU3D_EP0_STATE_STALL: ep0 is in stall status, will be auto-cleared 100 + * after receives a SETUP. 101 + */ 102 + enum mtu3_g_ep0_state { 103 + MU3D_EP0_STATE_SETUP = 1, 104 + MU3D_EP0_STATE_TX, 105 + MU3D_EP0_STATE_RX, 106 + MU3D_EP0_STATE_TX_END, 107 + MU3D_EP0_STATE_STALL, 108 + }; 109 + 110 + /** 111 + * @base: the base address of fifo 112 + * @limit: the bitmap size in bits 113 + * @bitmap: fifo bitmap in unit of @MTU3_EP_FIFO_UNIT 114 + */ 115 + struct mtu3_fifo_info { 116 + u32 base; 117 + u32 limit; 118 + DECLARE_BITMAP(bitmap, MTU3_FIFO_BIT_SIZE); 119 + }; 120 + 121 + /** 122 + * General Purpose Descriptor (GPD): 123 + * The format of TX GPD is a little different from RX one. 124 + * And the size of GPD is 16 bytes. 125 + * 126 + * @flag: 127 + * bit0: Hardware Own (HWO) 128 + * bit1: Buffer Descriptor Present (BDP), always 0, BD is not supported 129 + * bit2: Bypass (BPS), 1: HW skips this GPD if HWO = 1 130 + * bit7: Interrupt On Completion (IOC) 131 + * @chksum: This is used to validate the contents of this GPD; 132 + * If TXQ_CS_EN / RXQ_CS_EN bit is set, an interrupt is issued 133 + * when checksum validation fails; 134 + * Checksum value is calculated over the 16 bytes of the GPD by default; 135 + * @data_buf_len (RX ONLY): This value indicates the length of 136 + * the assigned data buffer 137 + * @next_gpd: Physical address of the next GPD 138 + * @buffer: Physical address of the data buffer 139 + * @buf_len: 140 + * (TX): This value indicates the length of the assigned data buffer 141 + * (RX): The total length of data received 142 + * @ext_len: reserved 143 + * @ext_flag: 144 + * bit5 (TX ONLY): Zero Length Packet (ZLP), 145 + */ 146 + struct qmu_gpd { 147 + __u8 flag; 148 + __u8 chksum; 149 + __le16 data_buf_len; 150 + __le32 next_gpd; 151 + __le32 buffer; 152 + __le16 buf_len; 153 + __u8 ext_len; 154 + __u8 ext_flag; 155 + } __packed; 156 + 157 + /** 158 + * dma: physical base address of GPD segment 159 + * start: virtual base address of GPD segment 160 + * end: the last GPD element 161 + * enqueue: the first empty GPD to use 162 + * dequeue: the first completed GPD serviced by ISR 163 + * NOTE: the size of GPD ring should be >= 2 164 + */ 165 + struct mtu3_gpd_ring { 166 + dma_addr_t dma; 167 + struct qmu_gpd *start; 168 + struct qmu_gpd *end; 169 + struct qmu_gpd *enqueue; 170 + struct qmu_gpd *dequeue; 171 + }; 172 + 173 + /** 174 + * @fifo_size: it is (@slot + 1) * @fifo_seg_size 175 + * @fifo_seg_size: it is roundup_pow_of_two(@maxp) 176 + */ 177 + struct mtu3_ep { 178 + struct usb_ep ep; 179 + char name[12]; 180 + struct mtu3 *mtu; 181 + u8 epnum; 182 + u8 type; 183 + u8 is_in; 184 + u16 maxp; 185 + int slot; 186 + u32 fifo_size; 187 + u32 fifo_addr; 188 + u32 fifo_seg_size; 189 + struct mtu3_fifo_info *fifo; 190 + 191 + struct list_head req_list; 192 + struct mtu3_gpd_ring gpd_ring; 193 + const struct usb_endpoint_descriptor *desc; 194 + 195 + int flags; 196 + u8 wedged; 197 + u8 busy; 198 + }; 199 + 200 + struct mtu3_request { 201 + struct usb_request request; 202 + struct list_head list; 203 + struct mtu3_ep *mep; 204 + struct mtu3 *mtu; 205 + struct qmu_gpd *gpd; 206 + int epnum; 207 + }; 208 + 209 + /** 210 + * struct mtu3 - device driver instance data. 211 + * @slot: MTU3_U2_IP_SLOT_DEFAULT for U2 IP 212 + * @may_wakeup: means device's remote wakeup is enabled 213 + * @is_self_powered: is reported in device status and the config descriptor 214 + * @ep0_req: dummy request used while handling standard USB requests 215 + * for GET_STATUS and SET_SEL 216 + * @setup_buf: ep0 response buffer for GET_STATUS and SET_SEL requests 217 + */ 218 + struct mtu3 { 219 + spinlock_t lock; 220 + struct device *dev; 221 + void __iomem *mac_base; 222 + void __iomem *ippc_base; 223 + struct phy *phy; 224 + struct regulator *vusb33; 225 + struct clk *sys_clk; 226 + int irq; 227 + 228 + struct mtu3_fifo_info tx_fifo; 229 + struct mtu3_fifo_info rx_fifo; 230 + 231 + struct mtu3_ep *ep_array; 232 + struct mtu3_ep *in_eps; 233 + struct mtu3_ep *out_eps; 234 + struct mtu3_ep *ep0; 235 + int num_eps; 236 + int slot; 237 + int active_ep; 238 + 239 + struct dma_pool *qmu_gpd_pool; 240 + enum mtu3_g_ep0_state ep0_state; 241 + struct usb_gadget g; /* the gadget */ 242 + struct usb_gadget_driver *gadget_driver; 243 + struct mtu3_request ep0_req; 244 + u8 setup_buf[EP0_RESPONSE_BUF]; 245 + 246 + unsigned is_active:1; 247 + unsigned may_wakeup:1; 248 + unsigned is_self_powered:1; 249 + unsigned test_mode:1; 250 + unsigned softconnect:1; 251 + 252 + u8 address; 253 + u8 test_mode_nr; 254 + u32 hw_version; 255 + }; 256 + 257 + static inline struct mtu3 *gadget_to_mtu3(struct usb_gadget *g) 258 + { 259 + return container_of(g, struct mtu3, g); 260 + } 261 + 262 + static inline int is_first_entry(const struct list_head *list, 263 + const struct list_head *head) 264 + { 265 + return list_is_last(head, list); 266 + } 267 + 268 + static inline struct mtu3_request *to_mtu3_request(struct usb_request *req) 269 + { 270 + return req ? container_of(req, struct mtu3_request, request) : NULL; 271 + } 272 + 273 + static inline struct mtu3_ep *to_mtu3_ep(struct usb_ep *ep) 274 + { 275 + return ep ? container_of(ep, struct mtu3_ep, ep) : NULL; 276 + } 277 + 278 + static inline struct mtu3_request *next_request(struct mtu3_ep *mep) 279 + { 280 + struct list_head *queue = &mep->req_list; 281 + 282 + if (list_empty(queue)) 283 + return NULL; 284 + 285 + return list_first_entry(queue, struct mtu3_request, list); 286 + } 287 + 288 + static inline void mtu3_writel(void __iomem *base, u32 offset, u32 data) 289 + { 290 + writel(data, base + offset); 291 + } 292 + 293 + static inline u32 mtu3_readl(void __iomem *base, u32 offset) 294 + { 295 + return readl(base + offset); 296 + } 297 + 298 + static inline void mtu3_setbits(void __iomem *base, u32 offset, u32 bits) 299 + { 300 + void __iomem *addr = base + offset; 301 + u32 tmp = readl(addr); 302 + 303 + writel((tmp | (bits)), addr); 304 + } 305 + 306 + static inline void mtu3_clrbits(void __iomem *base, u32 offset, u32 bits) 307 + { 308 + void __iomem *addr = base + offset; 309 + u32 tmp = readl(addr); 310 + 311 + writel((tmp & ~(bits)), addr); 312 + } 313 + 314 + int ssusb_check_clocks(struct mtu3 *mtu, u32 ex_clks); 315 + struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags); 316 + void mtu3_free_request(struct usb_ep *ep, struct usb_request *req); 317 + void mtu3_req_complete(struct mtu3_ep *mep, 318 + struct usb_request *req, int status); 319 + 320 + int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep, 321 + int interval, int burst, int mult); 322 + void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep); 323 + void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set); 324 + void mtu3_ep0_setup(struct mtu3 *mtu); 325 + void mtu3_start(struct mtu3 *mtu); 326 + void mtu3_stop(struct mtu3 *mtu); 327 + void mtu3_hs_softconn_set(struct mtu3 *mtu, bool enable); 328 + 329 + int mtu3_gadget_setup(struct mtu3 *mtu); 330 + void mtu3_gadget_cleanup(struct mtu3 *mtu); 331 + void mtu3_gadget_reset(struct mtu3 *mtu); 332 + void mtu3_gadget_suspend(struct mtu3 *mtu); 333 + void mtu3_gadget_resume(struct mtu3 *mtu); 334 + void mtu3_gadget_disconnect(struct mtu3 *mtu); 335 + int ssusb_gadget_init(struct mtu3 *mtu); 336 + void ssusb_gadget_exit(struct mtu3 *mtu); 337 + 338 + irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu); 339 + extern const struct usb_ep_ops mtu3_ep0_ops; 340 + 341 + #endif
+675
drivers/usb/mtu3/mtu3_core.c
··· 1 + /* 2 + * mtu3_core.c - hardware access layer and gadget init/exit of 3 + * MediaTek usb3 Dual-Role Controller Driver 4 + * 5 + * Copyright (C) 2016 MediaTek Inc. 6 + * 7 + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> 8 + * 9 + * This software is licensed under the terms of the GNU General Public 10 + * License version 2, as published by the Free Software Foundation, and 11 + * may be copied, distributed, and modified under those terms. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + */ 19 + 20 + #include <linux/kernel.h> 21 + #include <linux/module.h> 22 + #include <linux/of_address.h> 23 + #include <linux/of_irq.h> 24 + #include <linux/platform_device.h> 25 + 26 + #include "mtu3.h" 27 + 28 + static int ep_fifo_alloc(struct mtu3_ep *mep, u32 seg_size) 29 + { 30 + struct mtu3_fifo_info *fifo = mep->fifo; 31 + u32 num_bits = DIV_ROUND_UP(seg_size, MTU3_EP_FIFO_UNIT); 32 + u32 start_bit; 33 + 34 + /* ensure that @mep->fifo_seg_size is power of two */ 35 + num_bits = roundup_pow_of_two(num_bits); 36 + if (num_bits > fifo->limit) 37 + return -EINVAL; 38 + 39 + mep->fifo_seg_size = num_bits * MTU3_EP_FIFO_UNIT; 40 + num_bits = num_bits * (mep->slot + 1); 41 + start_bit = bitmap_find_next_zero_area(fifo->bitmap, 42 + fifo->limit, 0, num_bits, 0); 43 + if (start_bit >= fifo->limit) 44 + return -EOVERFLOW; 45 + 46 + bitmap_set(fifo->bitmap, start_bit, num_bits); 47 + mep->fifo_size = num_bits * MTU3_EP_FIFO_UNIT; 48 + mep->fifo_addr = fifo->base + MTU3_EP_FIFO_UNIT * start_bit; 49 + 50 + dev_dbg(mep->mtu->dev, "%s fifo:%#x/%#x, start_bit: %d\n", 51 + __func__, mep->fifo_seg_size, mep->fifo_size, start_bit); 52 + 53 + return mep->fifo_addr; 54 + } 55 + 56 + static void ep_fifo_free(struct mtu3_ep *mep) 57 + { 58 + struct mtu3_fifo_info *fifo = mep->fifo; 59 + u32 addr = mep->fifo_addr; 60 + u32 bits = mep->fifo_size / MTU3_EP_FIFO_UNIT; 61 + u32 start_bit; 62 + 63 + if (unlikely(addr < fifo->base || bits > fifo->limit)) 64 + return; 65 + 66 + start_bit = (addr - fifo->base) / MTU3_EP_FIFO_UNIT; 67 + bitmap_clear(fifo->bitmap, start_bit, bits); 68 + mep->fifo_size = 0; 69 + mep->fifo_seg_size = 0; 70 + 71 + dev_dbg(mep->mtu->dev, "%s size:%#x/%#x, start_bit: %d\n", 72 + __func__, mep->fifo_seg_size, mep->fifo_size, start_bit); 73 + } 74 + 75 + /* set/clear U3D HS device soft connect */ 76 + void mtu3_hs_softconn_set(struct mtu3 *mtu, bool enable) 77 + { 78 + if (enable) { 79 + mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, 80 + SOFT_CONN | SUSPENDM_ENABLE); 81 + } else { 82 + mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, 83 + SOFT_CONN | SUSPENDM_ENABLE); 84 + } 85 + dev_dbg(mtu->dev, "SOFTCONN = %d\n", !!enable); 86 + } 87 + 88 + /* only port0 of U2/U3 supports device mode */ 89 + static int mtu3_device_enable(struct mtu3 *mtu) 90 + { 91 + void __iomem *ibase = mtu->ippc_base; 92 + u32 check_clk = 0; 93 + 94 + mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); 95 + mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), 96 + (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN | 97 + SSUSB_U2_PORT_HOST_SEL)); 98 + mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL); 99 + 100 + return ssusb_check_clocks(mtu, check_clk); 101 + } 102 + 103 + static void mtu3_device_disable(struct mtu3 *mtu) 104 + { 105 + void __iomem *ibase = mtu->ippc_base; 106 + 107 + mtu3_setbits(ibase, SSUSB_U2_CTRL(0), 108 + SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN); 109 + mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL); 110 + mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); 111 + } 112 + 113 + /* reset U3D's device module. */ 114 + static void mtu3_device_reset(struct mtu3 *mtu) 115 + { 116 + void __iomem *ibase = mtu->ippc_base; 117 + 118 + mtu3_setbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST); 119 + udelay(1); 120 + mtu3_clrbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST); 121 + } 122 + 123 + /* disable all interrupts */ 124 + static void mtu3_intr_disable(struct mtu3 *mtu) 125 + { 126 + void __iomem *mbase = mtu->mac_base; 127 + 128 + /* Disable level 1 interrupts */ 129 + mtu3_writel(mbase, U3D_LV1IECR, ~0x0); 130 + 131 + /* Disable endpoint interrupts */ 132 + mtu3_writel(mbase, U3D_EPIECR, ~0x0); 133 + } 134 + 135 + static void mtu3_intr_status_clear(struct mtu3 *mtu) 136 + { 137 + void __iomem *mbase = mtu->mac_base; 138 + 139 + /* Clear EP0 and Tx/Rx EPn interrupts status */ 140 + mtu3_writel(mbase, U3D_EPISR, ~0x0); 141 + 142 + /* Clear U2 USB common interrupts status */ 143 + mtu3_writel(mbase, U3D_COMMON_USB_INTR, ~0x0); 144 + 145 + /* Clear speed change interrupt status */ 146 + mtu3_writel(mbase, U3D_DEV_LINK_INTR, ~0x0); 147 + } 148 + 149 + /* enable system global interrupt */ 150 + static void mtu3_intr_enable(struct mtu3 *mtu) 151 + { 152 + void __iomem *mbase = mtu->mac_base; 153 + u32 value; 154 + 155 + /*Enable level 1 interrupts (BMU, QMU, MAC3, DMA, MAC2, EPCTL) */ 156 + value = BMU_INTR | QMU_INTR | MAC2_INTR | EP_CTRL_INTR; 157 + mtu3_writel(mbase, U3D_LV1IESR, value); 158 + 159 + /* Enable U2 common USB interrupts */ 160 + value = SUSPEND_INTR | RESUME_INTR | RESET_INTR; 161 + mtu3_writel(mbase, U3D_COMMON_USB_INTR_ENABLE, value); 162 + 163 + /* Enable QMU interrupts. */ 164 + value = TXQ_CSERR_INT | TXQ_LENERR_INT | RXQ_CSERR_INT | 165 + RXQ_LENERR_INT | RXQ_ZLPERR_INT; 166 + mtu3_writel(mbase, U3D_QIESR1, value); 167 + 168 + /* Enable speed change interrupt */ 169 + mtu3_writel(mbase, U3D_DEV_LINK_INTR_ENABLE, SSUSB_DEV_SPEED_CHG_INTR); 170 + } 171 + 172 + /* set/clear the stall and toggle bits for non-ep0 */ 173 + void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set) 174 + { 175 + struct mtu3 *mtu = mep->mtu; 176 + void __iomem *mbase = mtu->mac_base; 177 + u8 epnum = mep->epnum; 178 + u32 csr; 179 + 180 + if (mep->is_in) { /* TX */ 181 + csr = mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)) & TX_W1C_BITS; 182 + if (set) 183 + csr |= TX_SENDSTALL; 184 + else 185 + csr = (csr & (~TX_SENDSTALL)) | TX_SENTSTALL; 186 + mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr); 187 + } else { /* RX */ 188 + csr = mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)) & RX_W1C_BITS; 189 + if (set) 190 + csr |= RX_SENDSTALL; 191 + else 192 + csr = (csr & (~RX_SENDSTALL)) | RX_SENTSTALL; 193 + mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr); 194 + } 195 + 196 + if (!set) { 197 + mtu3_setbits(mbase, U3D_EP_RST, EP_RST(mep->is_in, epnum)); 198 + mtu3_clrbits(mbase, U3D_EP_RST, EP_RST(mep->is_in, epnum)); 199 + mep->flags &= ~MTU3_EP_STALL; 200 + } else { 201 + mep->flags |= MTU3_EP_STALL; 202 + } 203 + 204 + dev_dbg(mtu->dev, "%s: %s\n", mep->name, 205 + set ? "SEND STALL" : "CLEAR STALL, with EP RESET"); 206 + } 207 + 208 + void mtu3_start(struct mtu3 *mtu) 209 + { 210 + void __iomem *mbase = mtu->mac_base; 211 + 212 + dev_dbg(mtu->dev, "%s devctl 0x%x\n", __func__, 213 + mtu3_readl(mbase, U3D_DEVICE_CONTROL)); 214 + 215 + mtu3_clrbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); 216 + 217 + /* Initialize the default interrupts */ 218 + mtu3_intr_enable(mtu); 219 + 220 + mtu->is_active = 1; 221 + 222 + if (mtu->softconnect) 223 + mtu3_hs_softconn_set(mtu, 1); 224 + } 225 + 226 + void mtu3_stop(struct mtu3 *mtu) 227 + { 228 + dev_dbg(mtu->dev, "%s\n", __func__); 229 + 230 + mtu3_intr_disable(mtu); 231 + mtu3_intr_status_clear(mtu); 232 + 233 + if (mtu->softconnect) 234 + mtu3_hs_softconn_set(mtu, 0); 235 + 236 + mtu->is_active = 0; 237 + mtu3_setbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); 238 + } 239 + 240 + /* for non-ep0 */ 241 + int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep, 242 + int interval, int burst, int mult) 243 + { 244 + void __iomem *mbase = mtu->mac_base; 245 + int epnum = mep->epnum; 246 + u32 csr0, csr1, csr2; 247 + int fifo_sgsz, fifo_addr; 248 + int num_pkts; 249 + 250 + fifo_addr = ep_fifo_alloc(mep, mep->maxp); 251 + if (fifo_addr < 0) { 252 + dev_err(mtu->dev, "alloc ep fifo failed(%d)\n", mep->maxp); 253 + return -ENOMEM; 254 + } 255 + fifo_sgsz = ilog2(mep->fifo_seg_size); 256 + dev_dbg(mtu->dev, "%s fifosz: %x(%x/%x)\n", __func__, fifo_sgsz, 257 + mep->fifo_seg_size, mep->fifo_size); 258 + 259 + if (mep->is_in) { 260 + csr0 = TX_TXMAXPKTSZ(mep->maxp); 261 + csr0 |= TX_DMAREQEN; 262 + 263 + num_pkts = (burst + 1) * (mult + 1) - 1; 264 + csr1 = TX_SS_BURST(burst) | TX_SLOT(mep->slot); 265 + csr1 |= TX_MAX_PKT(num_pkts) | TX_MULT(mult); 266 + 267 + csr2 = TX_FIFOADDR(fifo_addr >> 4); 268 + csr2 |= TX_FIFOSEGSIZE(fifo_sgsz); 269 + 270 + switch (mep->type) { 271 + case USB_ENDPOINT_XFER_BULK: 272 + csr1 |= TX_TYPE(TYPE_BULK); 273 + break; 274 + case USB_ENDPOINT_XFER_ISOC: 275 + csr1 |= TX_TYPE(TYPE_ISO); 276 + csr2 |= TX_BINTERVAL(interval); 277 + break; 278 + case USB_ENDPOINT_XFER_INT: 279 + csr1 |= TX_TYPE(TYPE_INT); 280 + csr2 |= TX_BINTERVAL(interval); 281 + break; 282 + } 283 + 284 + /* Enable QMU Done interrupt */ 285 + mtu3_setbits(mbase, U3D_QIESR0, QMU_TX_DONE_INT(epnum)); 286 + 287 + mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr0); 288 + mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), csr1); 289 + mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), csr2); 290 + 291 + dev_dbg(mtu->dev, "U3D_TX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n", 292 + epnum, mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)), 293 + mtu3_readl(mbase, MU3D_EP_TXCR1(epnum)), 294 + mtu3_readl(mbase, MU3D_EP_TXCR2(epnum))); 295 + } else { 296 + csr0 = RX_RXMAXPKTSZ(mep->maxp); 297 + csr0 |= RX_DMAREQEN; 298 + 299 + num_pkts = (burst + 1) * (mult + 1) - 1; 300 + csr1 = RX_SS_BURST(burst) | RX_SLOT(mep->slot); 301 + csr1 |= RX_MAX_PKT(num_pkts) | RX_MULT(mult); 302 + 303 + csr2 = RX_FIFOADDR(fifo_addr >> 4); 304 + csr2 |= RX_FIFOSEGSIZE(fifo_sgsz); 305 + 306 + switch (mep->type) { 307 + case USB_ENDPOINT_XFER_BULK: 308 + csr1 |= RX_TYPE(TYPE_BULK); 309 + break; 310 + case USB_ENDPOINT_XFER_ISOC: 311 + csr1 |= RX_TYPE(TYPE_ISO); 312 + csr2 |= RX_BINTERVAL(interval); 313 + break; 314 + case USB_ENDPOINT_XFER_INT: 315 + csr1 |= RX_TYPE(TYPE_INT); 316 + csr2 |= RX_BINTERVAL(interval); 317 + break; 318 + } 319 + 320 + /*Enable QMU Done interrupt */ 321 + mtu3_setbits(mbase, U3D_QIESR0, QMU_RX_DONE_INT(epnum)); 322 + 323 + mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr0); 324 + mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), csr1); 325 + mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), csr2); 326 + 327 + dev_dbg(mtu->dev, "U3D_RX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n", 328 + epnum, mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)), 329 + mtu3_readl(mbase, MU3D_EP_RXCR1(epnum)), 330 + mtu3_readl(mbase, MU3D_EP_RXCR2(epnum))); 331 + } 332 + 333 + dev_dbg(mtu->dev, "csr0:%#x, csr1:%#x, csr2:%#x\n", csr0, csr1, csr2); 334 + dev_dbg(mtu->dev, "%s: %s, fifo-addr:%#x, fifo-size:%#x(%#x/%#x)\n", 335 + __func__, mep->name, mep->fifo_addr, mep->fifo_size, 336 + fifo_sgsz, mep->fifo_seg_size); 337 + 338 + return 0; 339 + } 340 + 341 + /* for non-ep0 */ 342 + void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep) 343 + { 344 + void __iomem *mbase = mtu->mac_base; 345 + int epnum = mep->epnum; 346 + 347 + if (mep->is_in) { 348 + mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), 0); 349 + mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), 0); 350 + mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), 0); 351 + mtu3_setbits(mbase, U3D_QIECR0, QMU_TX_DONE_INT(epnum)); 352 + } else { 353 + mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), 0); 354 + mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), 0); 355 + mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), 0); 356 + mtu3_setbits(mbase, U3D_QIECR0, QMU_RX_DONE_INT(epnum)); 357 + } 358 + 359 + ep_fifo_free(mep); 360 + 361 + dev_dbg(mtu->dev, "%s: %s\n", __func__, mep->name); 362 + } 363 + 364 + /* 365 + * 1. when supports only HS, the fifo is shared for all EPs, and 366 + * the capability registers of @EPNTXFFSZ or @EPNRXFFSZ indicate 367 + * the total fifo size of non-ep0, and ep0's is fixed to 64B, 368 + * so the total fifo size is 64B + @EPNTXFFSZ; 369 + * Due to the first 64B should be reserved for EP0, non-ep0's fifo 370 + * starts from offset 64 and are divided into two equal parts for 371 + * TX or RX EPs for simplification. 372 + */ 373 + static void get_ep_fifo_config(struct mtu3 *mtu) 374 + { 375 + struct mtu3_fifo_info *tx_fifo; 376 + struct mtu3_fifo_info *rx_fifo; 377 + u32 fifosize; 378 + 379 + fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ); 380 + tx_fifo = &mtu->tx_fifo; 381 + tx_fifo->base = MTU3_U2_IP_EP0_FIFO_SIZE; 382 + tx_fifo->limit = (fifosize / MTU3_EP_FIFO_UNIT) >> 1; 383 + bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE); 384 + 385 + rx_fifo = &mtu->rx_fifo; 386 + rx_fifo->base = 387 + tx_fifo->base + tx_fifo->limit * MTU3_EP_FIFO_UNIT; 388 + rx_fifo->limit = tx_fifo->limit; 389 + bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE); 390 + mtu->slot = MTU3_U2_IP_SLOT_DEFAULT; 391 + 392 + dev_dbg(mtu->dev, "%s, TX: base-%d, limit-%d; RX: base-%d, limit-%d\n", 393 + __func__, tx_fifo->base, tx_fifo->limit, 394 + rx_fifo->base, rx_fifo->limit); 395 + } 396 + 397 + void mtu3_ep0_setup(struct mtu3 *mtu) 398 + { 399 + u32 maxpacket = mtu->g.ep0->maxpacket; 400 + u32 csr; 401 + 402 + dev_dbg(mtu->dev, "%s maxpacket: %d\n", __func__, maxpacket); 403 + 404 + csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR); 405 + csr &= ~EP0_MAXPKTSZ_MSK; 406 + csr |= EP0_MAXPKTSZ(maxpacket); 407 + csr &= EP0_W1C_BITS; 408 + mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr); 409 + 410 + /* Enable EP0 interrupt */ 411 + mtu3_writel(mtu->mac_base, U3D_EPIESR, EP0ISR); 412 + } 413 + 414 + static int mtu3_mem_alloc(struct mtu3 *mtu) 415 + { 416 + void __iomem *mbase = mtu->mac_base; 417 + struct mtu3_ep *ep_array; 418 + int in_ep_num, out_ep_num; 419 + u32 cap_epinfo; 420 + int ret; 421 + int i; 422 + 423 + mtu->hw_version = mtu3_readl(mtu->ippc_base, U3D_SSUSB_HW_ID); 424 + 425 + cap_epinfo = mtu3_readl(mbase, U3D_CAP_EPINFO); 426 + in_ep_num = CAP_TX_EP_NUM(cap_epinfo); 427 + out_ep_num = CAP_RX_EP_NUM(cap_epinfo); 428 + 429 + dev_info(mtu->dev, "IP version 0x%x\n", mtu->hw_version); 430 + dev_info(mtu->dev, "fifosz/epnum: Tx=%#x/%d, Rx=%#x/%d\n", 431 + mtu3_readl(mbase, U3D_CAP_EPNTXFFSZ), in_ep_num, 432 + mtu3_readl(mbase, U3D_CAP_EPNRXFFSZ), out_ep_num); 433 + 434 + /* one for ep0, another is reserved */ 435 + mtu->num_eps = min(in_ep_num, out_ep_num) + 1; 436 + ep_array = kcalloc(mtu->num_eps * 2, sizeof(*ep_array), GFP_KERNEL); 437 + if (ep_array == NULL) 438 + return -ENOMEM; 439 + 440 + mtu->ep_array = ep_array; 441 + mtu->in_eps = ep_array; 442 + mtu->out_eps = &ep_array[mtu->num_eps]; 443 + /* ep0 uses in_eps[0], out_eps[0] is reserved */ 444 + mtu->ep0 = mtu->in_eps; 445 + 446 + mtu->ep0->mtu = mtu; 447 + mtu->ep0->epnum = 0; 448 + 449 + for (i = 1; i < mtu->num_eps; i++) { 450 + struct mtu3_ep *mep = mtu->in_eps + i; 451 + 452 + mep->fifo = &mtu->tx_fifo; 453 + mep = mtu->out_eps + i; 454 + mep->fifo = &mtu->rx_fifo; 455 + } 456 + 457 + get_ep_fifo_config(mtu); 458 + 459 + ret = mtu3_qmu_init(mtu); 460 + if (ret) 461 + kfree(mtu->ep_array); 462 + 463 + return ret; 464 + } 465 + 466 + static void mtu3_mem_free(struct mtu3 *mtu) 467 + { 468 + mtu3_qmu_exit(mtu); 469 + kfree(mtu->ep_array); 470 + } 471 + 472 + static void mtu3_regs_init(struct mtu3 *mtu) 473 + { 474 + 475 + void __iomem *mbase = mtu->mac_base; 476 + 477 + /* be sure interrupts are disabled before registration of ISR */ 478 + mtu3_intr_disable(mtu); 479 + mtu3_intr_status_clear(mtu); 480 + 481 + mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN); 482 + /* HS/FS detected by HW */ 483 + mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE); 484 + 485 + /* delay about 0.1us from detecting reset to send chirp-K */ 486 + mtu3_clrbits(mbase, U3D_LINK_RESET_INFO, WTCHRP_MSK); 487 + 488 + /* U2/U3 detected by HW */ 489 + mtu3_writel(mbase, U3D_DEVICE_CONF, 0); 490 + 491 + /* enable QMU 16B checksum */ 492 + mtu3_setbits(mbase, U3D_QCR0, QMU_CS16B_EN); 493 + 494 + /* vbus detected by HW */ 495 + mtu3_clrbits(mbase, U3D_MISC_CTRL, VBUS_FRC_EN | VBUS_ON); 496 + } 497 + 498 + static irqreturn_t mtu3_link_isr(struct mtu3 *mtu) 499 + { 500 + void __iomem *mbase = mtu->mac_base; 501 + enum usb_device_speed udev_speed; 502 + u32 maxpkt = 64; 503 + u32 link; 504 + u32 speed; 505 + 506 + link = mtu3_readl(mbase, U3D_DEV_LINK_INTR); 507 + link &= mtu3_readl(mbase, U3D_DEV_LINK_INTR_ENABLE); 508 + mtu3_writel(mbase, U3D_DEV_LINK_INTR, link); /* W1C */ 509 + dev_dbg(mtu->dev, "=== LINK[%x] ===\n", link); 510 + 511 + if (!(link & SSUSB_DEV_SPEED_CHG_INTR)) 512 + return IRQ_NONE; 513 + 514 + speed = SSUSB_DEV_SPEED(mtu3_readl(mbase, U3D_DEVICE_CONF)); 515 + 516 + switch (speed) { 517 + case MTU3_SPEED_FULL: 518 + udev_speed = USB_SPEED_FULL; 519 + /*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */ 520 + mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf) 521 + | LPM_BESLCK(4) | LPM_BESLCK_U3(0xa)); 522 + mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, 523 + LPM_BESL_STALL | LPM_BESLD_STALL); 524 + break; 525 + case MTU3_SPEED_HIGH: 526 + udev_speed = USB_SPEED_HIGH; 527 + /*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */ 528 + mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf) 529 + | LPM_BESLCK(4) | LPM_BESLCK_U3(0xa)); 530 + mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, 531 + LPM_BESL_STALL | LPM_BESLD_STALL); 532 + break; 533 + default: 534 + udev_speed = USB_SPEED_UNKNOWN; 535 + break; 536 + } 537 + dev_dbg(mtu->dev, "%s: %s\n", __func__, usb_speed_string(udev_speed)); 538 + 539 + mtu->g.speed = udev_speed; 540 + mtu->g.ep0->maxpacket = maxpkt; 541 + mtu->ep0_state = MU3D_EP0_STATE_SETUP; 542 + 543 + if (udev_speed == USB_SPEED_UNKNOWN) 544 + mtu3_gadget_disconnect(mtu); 545 + else 546 + mtu3_ep0_setup(mtu); 547 + 548 + return IRQ_HANDLED; 549 + } 550 + 551 + static irqreturn_t mtu3_u2_common_isr(struct mtu3 *mtu) 552 + { 553 + void __iomem *mbase = mtu->mac_base; 554 + u32 u2comm; 555 + 556 + u2comm = mtu3_readl(mbase, U3D_COMMON_USB_INTR); 557 + u2comm &= mtu3_readl(mbase, U3D_COMMON_USB_INTR_ENABLE); 558 + mtu3_writel(mbase, U3D_COMMON_USB_INTR, u2comm); /* W1C */ 559 + dev_dbg(mtu->dev, "=== U2COMM[%x] ===\n", u2comm); 560 + 561 + if (u2comm & SUSPEND_INTR) 562 + mtu3_gadget_suspend(mtu); 563 + 564 + if (u2comm & RESUME_INTR) 565 + mtu3_gadget_resume(mtu); 566 + 567 + if (u2comm & RESET_INTR) 568 + mtu3_gadget_reset(mtu); 569 + 570 + return IRQ_HANDLED; 571 + } 572 + 573 + irqreturn_t mtu3_irq(int irq, void *data) 574 + { 575 + struct mtu3 *mtu = (struct mtu3 *)data; 576 + unsigned long flags; 577 + u32 level1; 578 + 579 + spin_lock_irqsave(&mtu->lock, flags); 580 + 581 + /* U3D_LV1ISR is RU */ 582 + level1 = mtu3_readl(mtu->mac_base, U3D_LV1ISR); 583 + level1 &= mtu3_readl(mtu->mac_base, U3D_LV1IER); 584 + 585 + if (level1 & EP_CTRL_INTR) 586 + mtu3_link_isr(mtu); 587 + 588 + if (level1 & MAC2_INTR) 589 + mtu3_u2_common_isr(mtu); 590 + 591 + if (level1 & BMU_INTR) 592 + mtu3_ep0_isr(mtu); 593 + 594 + if (level1 & QMU_INTR) 595 + mtu3_qmu_isr(mtu); 596 + 597 + spin_unlock_irqrestore(&mtu->lock, flags); 598 + 599 + return IRQ_HANDLED; 600 + } 601 + 602 + static int mtu3_hw_init(struct mtu3 *mtu) 603 + { 604 + int ret; 605 + 606 + mtu3_device_reset(mtu); 607 + 608 + ret = mtu3_device_enable(mtu); 609 + if (ret) { 610 + dev_err(mtu->dev, "device enable failed %d\n", ret); 611 + return ret; 612 + } 613 + 614 + ret = mtu3_mem_alloc(mtu); 615 + if (ret) 616 + return -ENOMEM; 617 + 618 + mtu3_regs_init(mtu); 619 + 620 + return 0; 621 + } 622 + 623 + static void mtu3_hw_exit(struct mtu3 *mtu) 624 + { 625 + mtu3_device_disable(mtu); 626 + mtu3_mem_free(mtu); 627 + } 628 + 629 + /*-------------------------------------------------------------------------*/ 630 + 631 + int ssusb_gadget_init(struct mtu3 *mtu) 632 + { 633 + struct device *dev = mtu->dev; 634 + int ret; 635 + 636 + ret = mtu3_hw_init(mtu); 637 + if (ret) { 638 + dev_err(dev, "mtu3 hw init failed:%d\n", ret); 639 + return ret; 640 + } 641 + 642 + ret = devm_request_irq(dev, mtu->irq, mtu3_irq, 0, dev_name(dev), mtu); 643 + if (ret) { 644 + dev_err(dev, "request irq %d failed!\n", mtu->irq); 645 + goto irq_err; 646 + } 647 + 648 + device_init_wakeup(dev, true); 649 + 650 + ret = mtu3_gadget_setup(mtu); 651 + if (ret) { 652 + dev_err(dev, "mtu3 gadget init failed:%d\n", ret); 653 + goto gadget_err; 654 + } 655 + 656 + dev_dbg(dev, " %s() done...\n", __func__); 657 + 658 + return 0; 659 + 660 + gadget_err: 661 + device_init_wakeup(dev, false); 662 + 663 + irq_err: 664 + mtu3_hw_exit(mtu); 665 + dev_err(dev, " %s() fail...\n", __func__); 666 + 667 + return ret; 668 + } 669 + 670 + void ssusb_gadget_exit(struct mtu3 *mtu) 671 + { 672 + mtu3_gadget_cleanup(mtu); 673 + device_init_wakeup(mtu->dev, false); 674 + mtu3_hw_exit(mtu); 675 + }
+709
drivers/usb/mtu3/mtu3_gadget.c
··· 1 + /* 2 + * mtu3_gadget.c - MediaTek usb3 DRD peripheral support 3 + * 4 + * Copyright (C) 2016 MediaTek Inc. 5 + * 6 + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> 7 + * 8 + * This software is licensed under the terms of the GNU General Public 9 + * License version 2, as published by the Free Software Foundation, and 10 + * may be copied, distributed, and modified under those terms. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + */ 18 + 19 + #include "mtu3.h" 20 + 21 + void mtu3_req_complete(struct mtu3_ep *mep, 22 + struct usb_request *req, int status) 23 + __releases(mep->mtu->lock) 24 + __acquires(mep->mtu->lock) 25 + { 26 + struct mtu3_request *mreq; 27 + struct mtu3 *mtu; 28 + int busy = mep->busy; 29 + 30 + mreq = to_mtu3_request(req); 31 + list_del(&mreq->list); 32 + if (mreq->request.status == -EINPROGRESS) 33 + mreq->request.status = status; 34 + 35 + mtu = mreq->mtu; 36 + mep->busy = 1; 37 + spin_unlock(&mtu->lock); 38 + 39 + /* ep0 makes use of PIO, needn't unmap it */ 40 + if (mep->epnum) 41 + usb_gadget_unmap_request(&mtu->g, req, mep->is_in); 42 + 43 + dev_dbg(mtu->dev, "%s complete req: %p, sts %d, %d/%d\n", mep->name, 44 + req, req->status, mreq->request.actual, mreq->request.length); 45 + 46 + usb_gadget_giveback_request(&mep->ep, &mreq->request); 47 + 48 + spin_lock(&mtu->lock); 49 + mep->busy = busy; 50 + } 51 + 52 + static void nuke(struct mtu3_ep *mep, const int status) 53 + { 54 + struct mtu3_request *mreq = NULL; 55 + 56 + mep->busy = 1; 57 + if (list_empty(&mep->req_list)) 58 + return; 59 + 60 + dev_dbg(mep->mtu->dev, "abort %s's req: sts %d\n", mep->name, status); 61 + 62 + /* exclude EP0 */ 63 + if (mep->epnum) 64 + mtu3_qmu_flush(mep); 65 + 66 + while (!list_empty(&mep->req_list)) { 67 + mreq = list_first_entry(&mep->req_list, 68 + struct mtu3_request, list); 69 + mtu3_req_complete(mep, &mreq->request, status); 70 + } 71 + } 72 + 73 + static int mtu3_ep_enable(struct mtu3_ep *mep) 74 + { 75 + const struct usb_endpoint_descriptor *desc; 76 + struct mtu3 *mtu = mep->mtu; 77 + u32 interval = 0; 78 + u32 mult = 0; 79 + u32 burst = 0; 80 + int max_packet; 81 + int ret; 82 + 83 + desc = mep->desc; 84 + mep->type = usb_endpoint_type(desc); 85 + max_packet = usb_endpoint_maxp(desc); 86 + mep->maxp = max_packet & GENMASK(10, 0); 87 + 88 + switch (mtu->g.speed) { 89 + case USB_SPEED_HIGH: 90 + if (usb_endpoint_xfer_isoc(desc) || 91 + usb_endpoint_xfer_int(desc)) { 92 + interval = desc->bInterval; 93 + interval = clamp_val(interval, 1, 16) - 1; 94 + burst = (max_packet & GENMASK(12, 11)) >> 11; 95 + } 96 + break; 97 + default: 98 + break; /*others are ignored */ 99 + } 100 + 101 + dev_dbg(mtu->dev, "%s maxp:%d, interval:%d, burst:%d, mult:%d\n", 102 + __func__, mep->maxp, interval, burst, mult); 103 + 104 + mep->ep.maxpacket = mep->maxp; 105 + mep->ep.desc = desc; 106 + 107 + /* slot mainly affects bulk/isoc transfer, so ignore int */ 108 + mep->slot = usb_endpoint_xfer_int(desc) ? 0 : mtu->slot; 109 + 110 + ret = mtu3_config_ep(mtu, mep, interval, burst, mult); 111 + if (ret < 0) 112 + return ret; 113 + 114 + ret = mtu3_gpd_ring_alloc(mep); 115 + if (ret < 0) { 116 + mtu3_deconfig_ep(mtu, mep); 117 + return ret; 118 + } 119 + 120 + mtu3_qmu_start(mep); 121 + 122 + return 0; 123 + } 124 + 125 + static int mtu3_ep_disable(struct mtu3_ep *mep) 126 + { 127 + struct mtu3 *mtu = mep->mtu; 128 + 129 + mtu3_qmu_stop(mep); 130 + 131 + /* abort all pending requests */ 132 + nuke(mep, -ESHUTDOWN); 133 + mtu3_deconfig_ep(mtu, mep); 134 + mtu3_gpd_ring_free(mep); 135 + 136 + mep->desc = NULL; 137 + mep->ep.desc = NULL; 138 + mep->type = 0; 139 + mep->flags = 0; 140 + 141 + return 0; 142 + } 143 + 144 + static int mtu3_gadget_ep_enable(struct usb_ep *ep, 145 + const struct usb_endpoint_descriptor *desc) 146 + { 147 + struct mtu3_ep *mep; 148 + struct mtu3 *mtu; 149 + unsigned long flags; 150 + int ret = -EINVAL; 151 + 152 + if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 153 + pr_debug("%s invalid parameters\n", __func__); 154 + return -EINVAL; 155 + } 156 + 157 + if (!desc->wMaxPacketSize) { 158 + pr_debug("%s missing wMaxPacketSize\n", __func__); 159 + return -EINVAL; 160 + } 161 + mep = to_mtu3_ep(ep); 162 + mtu = mep->mtu; 163 + 164 + /* check ep number and direction against endpoint */ 165 + if (usb_endpoint_num(desc) != mep->epnum) 166 + return -EINVAL; 167 + 168 + if (!!usb_endpoint_dir_in(desc) ^ !!mep->is_in) 169 + return -EINVAL; 170 + 171 + dev_dbg(mtu->dev, "%s %s\n", __func__, ep->name); 172 + 173 + if (mep->flags & MTU3_EP_ENABLED) { 174 + dev_WARN_ONCE(mtu->dev, true, "%s is already enabled\n", 175 + mep->name); 176 + return 0; 177 + } 178 + 179 + spin_lock_irqsave(&mtu->lock, flags); 180 + mep->desc = desc; 181 + 182 + ret = mtu3_ep_enable(mep); 183 + if (ret) 184 + goto error; 185 + 186 + mep->busy = 0; 187 + mep->wedged = 0; 188 + mep->flags |= MTU3_EP_ENABLED; 189 + mtu->active_ep++; 190 + 191 + error: 192 + spin_unlock_irqrestore(&mtu->lock, flags); 193 + 194 + dev_dbg(mtu->dev, "%s active_ep=%d\n", __func__, mtu->active_ep); 195 + 196 + return ret; 197 + } 198 + 199 + static int mtu3_gadget_ep_disable(struct usb_ep *ep) 200 + { 201 + struct mtu3_ep *mep = to_mtu3_ep(ep); 202 + struct mtu3 *mtu = mep->mtu; 203 + unsigned long flags; 204 + 205 + dev_dbg(mtu->dev, "%s %s\n", __func__, mep->name); 206 + 207 + if (!(mep->flags & MTU3_EP_ENABLED)) { 208 + dev_warn(mtu->dev, "%s is already disabled\n", mep->name); 209 + return 0; 210 + } 211 + 212 + spin_lock_irqsave(&mtu->lock, flags); 213 + mtu3_ep_disable(mep); 214 + mep->flags &= ~MTU3_EP_ENABLED; 215 + mtu->active_ep--; 216 + spin_unlock_irqrestore(&(mtu->lock), flags); 217 + 218 + dev_dbg(mtu->dev, "%s active_ep=%d, mtu3 is_active=%d\n", 219 + __func__, mtu->active_ep, mtu->is_active); 220 + 221 + return 0; 222 + } 223 + 224 + struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) 225 + { 226 + struct mtu3_ep *mep = to_mtu3_ep(ep); 227 + struct mtu3_request *mreq; 228 + 229 + mreq = kzalloc(sizeof(*mreq), gfp_flags); 230 + if (!mreq) 231 + return NULL; 232 + 233 + mreq->request.dma = DMA_ADDR_INVALID; 234 + mreq->epnum = mep->epnum; 235 + mreq->mep = mep; 236 + 237 + return &mreq->request; 238 + } 239 + 240 + void mtu3_free_request(struct usb_ep *ep, struct usb_request *req) 241 + { 242 + kfree(to_mtu3_request(req)); 243 + } 244 + 245 + static int mtu3_gadget_queue(struct usb_ep *ep, 246 + struct usb_request *req, gfp_t gfp_flags) 247 + { 248 + struct mtu3_ep *mep; 249 + struct mtu3_request *mreq; 250 + struct mtu3 *mtu; 251 + unsigned long flags; 252 + int ret = 0; 253 + 254 + if (!ep || !req) 255 + return -EINVAL; 256 + 257 + if (!req->buf) 258 + return -ENODATA; 259 + 260 + mep = to_mtu3_ep(ep); 261 + mtu = mep->mtu; 262 + mreq = to_mtu3_request(req); 263 + mreq->mtu = mtu; 264 + 265 + if (mreq->mep != mep) 266 + return -EINVAL; 267 + 268 + dev_dbg(mtu->dev, "%s %s EP%d(%s), req=%p, maxp=%d, len#%d\n", 269 + __func__, mep->is_in ? "TX" : "RX", mreq->epnum, ep->name, 270 + mreq, ep->maxpacket, mreq->request.length); 271 + 272 + if (req->length > GPD_BUF_SIZE) { 273 + dev_warn(mtu->dev, 274 + "req length > supported MAX:%d requested:%d\n", 275 + GPD_BUF_SIZE, req->length); 276 + return -EOPNOTSUPP; 277 + } 278 + 279 + /* don't queue if the ep is down */ 280 + if (!mep->desc) { 281 + dev_dbg(mtu->dev, "req=%p queued to %s while it's disabled\n", 282 + req, ep->name); 283 + return -ESHUTDOWN; 284 + } 285 + 286 + mreq->request.actual = 0; 287 + mreq->request.status = -EINPROGRESS; 288 + 289 + ret = usb_gadget_map_request(&mtu->g, req, mep->is_in); 290 + if (ret) { 291 + dev_err(mtu->dev, "dma mapping failed\n"); 292 + return ret; 293 + } 294 + 295 + spin_lock_irqsave(&mtu->lock, flags); 296 + 297 + if (mtu3_prepare_transfer(mep)) { 298 + ret = -EAGAIN; 299 + goto error; 300 + } 301 + 302 + list_add_tail(&mreq->list, &mep->req_list); 303 + mtu3_insert_gpd(mep, mreq); 304 + mtu3_qmu_resume(mep); 305 + 306 + error: 307 + spin_unlock_irqrestore(&mtu->lock, flags); 308 + 309 + return ret; 310 + } 311 + 312 + static int mtu3_gadget_dequeue(struct usb_ep *ep, struct usb_request *req) 313 + { 314 + struct mtu3_ep *mep = to_mtu3_ep(ep); 315 + struct mtu3_request *mreq = to_mtu3_request(req); 316 + struct mtu3_request *r; 317 + unsigned long flags; 318 + int ret = 0; 319 + struct mtu3 *mtu = mep->mtu; 320 + 321 + if (!ep || !req || mreq->mep != mep) 322 + return -EINVAL; 323 + 324 + dev_dbg(mtu->dev, "%s : req=%p\n", __func__, req); 325 + 326 + spin_lock_irqsave(&mtu->lock, flags); 327 + 328 + list_for_each_entry(r, &mep->req_list, list) { 329 + if (r == mreq) 330 + break; 331 + } 332 + if (r != mreq) { 333 + dev_dbg(mtu->dev, "req=%p not queued to %s\n", req, ep->name); 334 + ret = -EINVAL; 335 + goto done; 336 + } 337 + 338 + mtu3_qmu_flush(mep); /* REVISIT: set BPS ?? */ 339 + mtu3_req_complete(mep, req, -ECONNRESET); 340 + mtu3_qmu_start(mep); 341 + 342 + done: 343 + spin_unlock_irqrestore(&mtu->lock, flags); 344 + 345 + return ret; 346 + } 347 + 348 + /* 349 + * Set or clear the halt bit of an EP. 350 + * A halted EP won't TX/RX any data but will queue requests. 351 + */ 352 + static int mtu3_gadget_ep_set_halt(struct usb_ep *ep, int value) 353 + { 354 + struct mtu3_ep *mep = to_mtu3_ep(ep); 355 + struct mtu3 *mtu = mep->mtu; 356 + struct mtu3_request *mreq; 357 + unsigned long flags; 358 + int ret = 0; 359 + 360 + if (!ep) 361 + return -EINVAL; 362 + 363 + dev_dbg(mtu->dev, "%s : %s...", __func__, ep->name); 364 + 365 + spin_lock_irqsave(&mtu->lock, flags); 366 + 367 + if (mep->type == USB_ENDPOINT_XFER_ISOC) { 368 + ret = -EINVAL; 369 + goto done; 370 + } 371 + 372 + mreq = next_request(mep); 373 + if (value) { 374 + /* 375 + * If there is not request for TX-EP, QMU will not transfer 376 + * data to TX-FIFO, so no need check whether TX-FIFO 377 + * holds bytes or not here 378 + */ 379 + if (mreq) { 380 + dev_dbg(mtu->dev, "req in progress, cannot halt %s\n", 381 + ep->name); 382 + ret = -EAGAIN; 383 + goto done; 384 + } 385 + } else { 386 + mep->wedged = 0; 387 + } 388 + 389 + dev_dbg(mtu->dev, "%s %s stall\n", ep->name, value ? "set" : "clear"); 390 + 391 + mtu3_ep_stall_set(mep, value); 392 + 393 + done: 394 + spin_unlock_irqrestore(&mtu->lock, flags); 395 + 396 + return ret; 397 + } 398 + 399 + /* Sets the halt feature with the clear requests ignored */ 400 + static int mtu3_gadget_ep_set_wedge(struct usb_ep *ep) 401 + { 402 + struct mtu3_ep *mep = to_mtu3_ep(ep); 403 + 404 + if (!ep) 405 + return -EINVAL; 406 + 407 + mep->wedged = 1; 408 + 409 + return usb_ep_set_halt(ep); 410 + } 411 + 412 + static const struct usb_ep_ops mtu3_ep_ops = { 413 + .enable = mtu3_gadget_ep_enable, 414 + .disable = mtu3_gadget_ep_disable, 415 + .alloc_request = mtu3_alloc_request, 416 + .free_request = mtu3_free_request, 417 + .queue = mtu3_gadget_queue, 418 + .dequeue = mtu3_gadget_dequeue, 419 + .set_halt = mtu3_gadget_ep_set_halt, 420 + .set_wedge = mtu3_gadget_ep_set_wedge, 421 + }; 422 + 423 + static int mtu3_gadget_get_frame(struct usb_gadget *gadget) 424 + { 425 + struct mtu3 *mtu = gadget_to_mtu3(gadget); 426 + 427 + return (int)mtu3_readl(mtu->mac_base, U3D_USB20_FRAME_NUM); 428 + } 429 + 430 + static int mtu3_gadget_wakeup(struct usb_gadget *gadget) 431 + { 432 + struct mtu3 *mtu = gadget_to_mtu3(gadget); 433 + unsigned long flags; 434 + 435 + dev_dbg(mtu->dev, "%s\n", __func__); 436 + 437 + /* remote wakeup feature is not enabled by host */ 438 + if (!mtu->may_wakeup) 439 + return -EOPNOTSUPP; 440 + 441 + spin_lock_irqsave(&mtu->lock, flags); 442 + 443 + mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME); 444 + spin_unlock_irqrestore(&mtu->lock, flags); 445 + usleep_range(10000, 11000); 446 + spin_lock_irqsave(&mtu->lock, flags); 447 + mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME); 448 + 449 + spin_unlock_irqrestore(&mtu->lock, flags); 450 + return 0; 451 + } 452 + 453 + static int mtu3_gadget_set_self_powered(struct usb_gadget *gadget, 454 + int is_selfpowered) 455 + { 456 + struct mtu3 *mtu = gadget_to_mtu3(gadget); 457 + 458 + mtu->is_self_powered = !!is_selfpowered; 459 + return 0; 460 + } 461 + 462 + static int mtu3_gadget_pullup(struct usb_gadget *gadget, int is_on) 463 + { 464 + struct mtu3 *mtu = gadget_to_mtu3(gadget); 465 + unsigned long flags; 466 + 467 + dev_dbg(mtu->dev, "%s (%s) for %sactive device\n", __func__, 468 + is_on ? "on" : "off", mtu->is_active ? "" : "in"); 469 + 470 + /* we'd rather not pullup unless the device is active. */ 471 + spin_lock_irqsave(&mtu->lock, flags); 472 + 473 + is_on = !!is_on; 474 + if (!mtu->is_active) { 475 + /* save it for mtu3_start() to process the request */ 476 + mtu->softconnect = is_on; 477 + } else if (is_on != mtu->softconnect) { 478 + mtu->softconnect = is_on; 479 + mtu3_hs_softconn_set(mtu, is_on); 480 + } 481 + 482 + spin_unlock_irqrestore(&mtu->lock, flags); 483 + 484 + return 0; 485 + } 486 + 487 + static int mtu3_gadget_start(struct usb_gadget *gadget, 488 + struct usb_gadget_driver *driver) 489 + { 490 + struct mtu3 *mtu = gadget_to_mtu3(gadget); 491 + unsigned long flags; 492 + 493 + if (mtu->gadget_driver) { 494 + dev_err(mtu->dev, "%s is already bound to %s\n", 495 + mtu->g.name, mtu->gadget_driver->driver.name); 496 + return -EBUSY; 497 + } 498 + 499 + dev_dbg(mtu->dev, "bind driver %s\n", driver->function); 500 + 501 + spin_lock_irqsave(&mtu->lock, flags); 502 + 503 + mtu->softconnect = 0; 504 + mtu->gadget_driver = driver; 505 + 506 + mtu3_start(mtu); 507 + 508 + spin_unlock_irqrestore(&mtu->lock, flags); 509 + 510 + return 0; 511 + } 512 + 513 + static void stop_activity(struct mtu3 *mtu) 514 + { 515 + struct usb_gadget_driver *driver = mtu->gadget_driver; 516 + int i; 517 + 518 + /* don't disconnect if it's not connected */ 519 + if (mtu->g.speed == USB_SPEED_UNKNOWN) 520 + driver = NULL; 521 + else 522 + mtu->g.speed = USB_SPEED_UNKNOWN; 523 + 524 + /* deactivate the hardware */ 525 + if (mtu->softconnect) { 526 + mtu->softconnect = 0; 527 + mtu3_hs_softconn_set(mtu, 0); 528 + } 529 + 530 + /* 531 + * killing any outstanding requests will quiesce the driver; 532 + * then report disconnect 533 + */ 534 + nuke(mtu->ep0, -ESHUTDOWN); 535 + for (i = 1; i < mtu->num_eps; i++) { 536 + nuke(mtu->in_eps + i, -ESHUTDOWN); 537 + nuke(mtu->out_eps + i, -ESHUTDOWN); 538 + } 539 + 540 + if (driver) { 541 + spin_unlock(&mtu->lock); 542 + driver->disconnect(&mtu->g); 543 + spin_lock(&mtu->lock); 544 + } 545 + } 546 + 547 + static int mtu3_gadget_stop(struct usb_gadget *g) 548 + { 549 + struct mtu3 *mtu = gadget_to_mtu3(g); 550 + unsigned long flags; 551 + 552 + dev_dbg(mtu->dev, "%s\n", __func__); 553 + 554 + spin_lock_irqsave(&mtu->lock, flags); 555 + 556 + stop_activity(mtu); 557 + mtu->gadget_driver = NULL; 558 + 559 + mtu3_stop(mtu); 560 + 561 + spin_unlock_irqrestore(&mtu->lock, flags); 562 + 563 + return 0; 564 + } 565 + 566 + static const struct usb_gadget_ops mtu3_gadget_ops = { 567 + .get_frame = mtu3_gadget_get_frame, 568 + .wakeup = mtu3_gadget_wakeup, 569 + .set_selfpowered = mtu3_gadget_set_self_powered, 570 + .pullup = mtu3_gadget_pullup, 571 + .udc_start = mtu3_gadget_start, 572 + .udc_stop = mtu3_gadget_stop, 573 + }; 574 + 575 + static void init_hw_ep(struct mtu3 *mtu, struct mtu3_ep *mep, 576 + u32 epnum, u32 is_in) 577 + { 578 + mep->epnum = epnum; 579 + mep->mtu = mtu; 580 + mep->is_in = is_in; 581 + 582 + INIT_LIST_HEAD(&mep->req_list); 583 + 584 + sprintf(mep->name, "ep%d%s", epnum, 585 + !epnum ? "" : (is_in ? "in" : "out")); 586 + 587 + mep->ep.name = mep->name; 588 + INIT_LIST_HEAD(&mep->ep.ep_list); 589 + 590 + /* initialize maxpacket as HS */ 591 + if (!epnum) { 592 + usb_ep_set_maxpacket_limit(&mep->ep, 64); 593 + mep->ep.caps.type_control = true; 594 + mep->ep.ops = &mtu3_ep0_ops; 595 + mtu->g.ep0 = &mep->ep; 596 + } else { 597 + usb_ep_set_maxpacket_limit(&mep->ep, 512); 598 + mep->ep.caps.type_iso = true; 599 + mep->ep.caps.type_bulk = true; 600 + mep->ep.caps.type_int = true; 601 + mep->ep.ops = &mtu3_ep_ops; 602 + list_add_tail(&mep->ep.ep_list, &mtu->g.ep_list); 603 + } 604 + 605 + dev_dbg(mtu->dev, "%s, name=%s, maxp=%d\n", __func__, mep->ep.name, 606 + mep->ep.maxpacket); 607 + 608 + if (!epnum) { 609 + mep->ep.caps.dir_in = true; 610 + mep->ep.caps.dir_out = true; 611 + } else if (is_in) { 612 + mep->ep.caps.dir_in = true; 613 + } else { 614 + mep->ep.caps.dir_out = true; 615 + } 616 + } 617 + 618 + static void mtu3_gadget_init_eps(struct mtu3 *mtu) 619 + { 620 + u8 epnum; 621 + 622 + /* initialize endpoint list just once */ 623 + INIT_LIST_HEAD(&(mtu->g.ep_list)); 624 + 625 + dev_dbg(mtu->dev, "%s num_eps(1 for a pair of tx&rx ep)=%d\n", 626 + __func__, mtu->num_eps); 627 + 628 + init_hw_ep(mtu, mtu->ep0, 0, 0); 629 + for (epnum = 1; epnum < mtu->num_eps; epnum++) { 630 + init_hw_ep(mtu, mtu->in_eps + epnum, epnum, 1); 631 + init_hw_ep(mtu, mtu->out_eps + epnum, epnum, 0); 632 + } 633 + } 634 + 635 + int mtu3_gadget_setup(struct mtu3 *mtu) 636 + { 637 + int ret; 638 + 639 + mtu->g.ops = &mtu3_gadget_ops; 640 + mtu->g.max_speed = USB_SPEED_HIGH; 641 + mtu->g.speed = USB_SPEED_UNKNOWN; 642 + mtu->g.sg_supported = 0; 643 + mtu->g.name = MTU3_DRIVER_NAME; 644 + mtu->is_active = 0; 645 + 646 + mtu3_gadget_init_eps(mtu); 647 + 648 + ret = usb_add_gadget_udc(mtu->dev, &mtu->g); 649 + if (ret) { 650 + dev_err(mtu->dev, "failed to register udc\n"); 651 + return ret; 652 + } 653 + 654 + usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED); 655 + 656 + return 0; 657 + } 658 + 659 + void mtu3_gadget_cleanup(struct mtu3 *mtu) 660 + { 661 + usb_del_gadget_udc(&mtu->g); 662 + } 663 + 664 + void mtu3_gadget_resume(struct mtu3 *mtu) 665 + { 666 + dev_dbg(mtu->dev, "gadget RESUME\n"); 667 + if (mtu->gadget_driver && mtu->gadget_driver->resume) { 668 + spin_unlock(&mtu->lock); 669 + mtu->gadget_driver->resume(&mtu->g); 670 + spin_lock(&mtu->lock); 671 + } 672 + } 673 + 674 + /* called when SOF packets stop for 3+ msec or enters U3 */ 675 + void mtu3_gadget_suspend(struct mtu3 *mtu) 676 + { 677 + dev_dbg(mtu->dev, "gadget SUSPEND\n"); 678 + if (mtu->gadget_driver && mtu->gadget_driver->suspend) { 679 + spin_unlock(&mtu->lock); 680 + mtu->gadget_driver->suspend(&mtu->g); 681 + spin_lock(&mtu->lock); 682 + } 683 + } 684 + 685 + /* called when VBUS drops below session threshold, and in other cases */ 686 + void mtu3_gadget_disconnect(struct mtu3 *mtu) 687 + { 688 + dev_dbg(mtu->dev, "gadget DISCONNECT\n"); 689 + if (mtu->gadget_driver && mtu->gadget_driver->disconnect) { 690 + spin_unlock(&mtu->lock); 691 + mtu->gadget_driver->disconnect(&mtu->g); 692 + spin_lock(&mtu->lock); 693 + } 694 + 695 + usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED); 696 + } 697 + 698 + void mtu3_gadget_reset(struct mtu3 *mtu) 699 + { 700 + dev_dbg(mtu->dev, "gadget RESET\n"); 701 + 702 + /* report disconnect, if we didn't flush EP state */ 703 + if (mtu->g.speed != USB_SPEED_UNKNOWN) 704 + mtu3_gadget_disconnect(mtu); 705 + 706 + mtu->address = 0; 707 + mtu->ep0_state = MU3D_EP0_STATE_SETUP; 708 + mtu->may_wakeup = 0; 709 + }
+791
drivers/usb/mtu3/mtu3_gadget_ep0.c
··· 1 + /* 2 + * mtu3_gadget_ep0.c - MediaTek USB3 DRD peripheral driver ep0 handling 3 + * 4 + * Copyright (c) 2016 MediaTek Inc. 5 + * 6 + * Author: Chunfeng.Yun <chunfeng.yun@mediatek.com> 7 + * 8 + * This software is licensed under the terms of the GNU General Public 9 + * License version 2, as published by the Free Software Foundation, and 10 + * may be copied, distributed, and modified under those terms. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + */ 18 + 19 + #include "mtu3.h" 20 + 21 + /* ep0 is always mtu3->in_eps[0] */ 22 + #define next_ep0_request(mtu) next_request((mtu)->ep0) 23 + 24 + /* for high speed test mode; see USB 2.0 spec 7.1.20 */ 25 + static const u8 mtu3_test_packet[53] = { 26 + /* implicit SYNC then DATA0 to start */ 27 + 28 + /* JKJKJKJK x9 */ 29 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 30 + /* JJKKJJKK x8 */ 31 + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 32 + /* JJJJKKKK x8 */ 33 + 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 34 + /* JJJJJJJKKKKKKK x8 */ 35 + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 36 + /* JJJJJJJK x8 */ 37 + 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 38 + /* JKKKKKKK x10, JK */ 39 + 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e, 40 + /* implicit CRC16 then EOP to end */ 41 + }; 42 + 43 + static char *decode_ep0_state(struct mtu3 *mtu) 44 + { 45 + switch (mtu->ep0_state) { 46 + case MU3D_EP0_STATE_SETUP: 47 + return "SETUP"; 48 + case MU3D_EP0_STATE_TX: 49 + return "IN"; 50 + case MU3D_EP0_STATE_RX: 51 + return "OUT"; 52 + case MU3D_EP0_STATE_TX_END: 53 + return "TX-END"; 54 + case MU3D_EP0_STATE_STALL: 55 + return "STALL"; 56 + default: 57 + return "??"; 58 + } 59 + } 60 + 61 + static void ep0_req_giveback(struct mtu3 *mtu, struct usb_request *req) 62 + { 63 + mtu3_req_complete(mtu->ep0, req, 0); 64 + } 65 + 66 + static int 67 + forward_to_driver(struct mtu3 *mtu, const struct usb_ctrlrequest *setup) 68 + __releases(mtu->lock) 69 + __acquires(mtu->lock) 70 + { 71 + int ret; 72 + 73 + if (!mtu->gadget_driver) 74 + return -EOPNOTSUPP; 75 + 76 + spin_unlock(&mtu->lock); 77 + ret = mtu->gadget_driver->setup(&mtu->g, setup); 78 + spin_lock(&mtu->lock); 79 + 80 + dev_dbg(mtu->dev, "%s ret %d\n", __func__, ret); 81 + return ret; 82 + } 83 + 84 + static void ep0_write_fifo(struct mtu3_ep *mep, const u8 *src, u16 len) 85 + { 86 + void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0; 87 + u16 index = 0; 88 + 89 + dev_dbg(mep->mtu->dev, "%s: ep%din, len=%d, buf=%p\n", 90 + __func__, mep->epnum, len, src); 91 + 92 + if (len >= 4) { 93 + iowrite32_rep(fifo, src, len >> 2); 94 + index = len & ~0x03; 95 + } 96 + if (len & 0x02) { 97 + writew(*(u16 *)&src[index], fifo); 98 + index += 2; 99 + } 100 + if (len & 0x01) 101 + writeb(src[index], fifo); 102 + } 103 + 104 + static void ep0_read_fifo(struct mtu3_ep *mep, u8 *dst, u16 len) 105 + { 106 + void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0; 107 + u32 value; 108 + u16 index = 0; 109 + 110 + dev_dbg(mep->mtu->dev, "%s: ep%dout len=%d buf=%p\n", 111 + __func__, mep->epnum, len, dst); 112 + 113 + if (len >= 4) { 114 + ioread32_rep(fifo, dst, len >> 2); 115 + index = len & ~0x03; 116 + } 117 + if (len & 0x3) { 118 + value = readl(fifo); 119 + memcpy(&dst[index], &value, len & 0x3); 120 + } 121 + 122 + } 123 + 124 + static void ep0_load_test_packet(struct mtu3 *mtu) 125 + { 126 + /* 127 + * because the length of test packet is less than max packet of HS ep0, 128 + * write it into fifo directly. 129 + */ 130 + ep0_write_fifo(mtu->ep0, mtu3_test_packet, sizeof(mtu3_test_packet)); 131 + } 132 + 133 + /* 134 + * A. send STALL for setup transfer without data stage: 135 + * set SENDSTALL and SETUPPKTRDY at the same time; 136 + * B. send STALL for other cases: 137 + * set SENDSTALL only. 138 + */ 139 + static void ep0_stall_set(struct mtu3_ep *mep0, bool set, u32 pktrdy) 140 + { 141 + struct mtu3 *mtu = mep0->mtu; 142 + void __iomem *mbase = mtu->mac_base; 143 + u32 csr; 144 + 145 + /* EP0_SENTSTALL is W1C */ 146 + csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS; 147 + if (set) 148 + csr |= EP0_SENDSTALL | pktrdy; 149 + else 150 + csr = (csr & ~EP0_SENDSTALL) | EP0_SENTSTALL; 151 + mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr); 152 + 153 + mtu->ep0_state = MU3D_EP0_STATE_SETUP; 154 + 155 + dev_dbg(mtu->dev, "ep0: %s STALL, ep0_state: %s\n", 156 + set ? "SEND" : "CLEAR", decode_ep0_state(mtu)); 157 + } 158 + 159 + static int ep0_queue(struct mtu3_ep *mep0, struct mtu3_request *mreq); 160 + 161 + static void ep0_dummy_complete(struct usb_ep *ep, struct usb_request *req) 162 + {} 163 + 164 + static int 165 + ep0_get_status(struct mtu3 *mtu, const struct usb_ctrlrequest *setup) 166 + { 167 + struct mtu3_ep *mep = NULL; 168 + int handled = 1; 169 + u8 result[2] = {0, 0}; 170 + u8 epnum = 0; 171 + int is_in; 172 + 173 + switch (setup->bRequestType & USB_RECIP_MASK) { 174 + case USB_RECIP_DEVICE: 175 + result[0] = mtu->is_self_powered << USB_DEVICE_SELF_POWERED; 176 + result[0] |= mtu->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; 177 + break; 178 + case USB_RECIP_INTERFACE: 179 + break; 180 + case USB_RECIP_ENDPOINT: 181 + epnum = (u8) le16_to_cpu(setup->wIndex); 182 + is_in = epnum & USB_DIR_IN; 183 + epnum &= USB_ENDPOINT_NUMBER_MASK; 184 + 185 + if (epnum >= mtu->num_eps) { 186 + handled = -EINVAL; 187 + break; 188 + } 189 + if (!epnum) 190 + break; 191 + 192 + mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum; 193 + if (!mep->desc) { 194 + handled = -EINVAL; 195 + break; 196 + } 197 + if (mep->flags & MTU3_EP_STALL) 198 + result[0] |= 1 << USB_ENDPOINT_HALT; 199 + 200 + break; 201 + default: 202 + /* class, vendor, etc ... delegate */ 203 + handled = 0; 204 + break; 205 + } 206 + 207 + if (handled > 0) { 208 + int ret; 209 + 210 + /* prepare a data stage for GET_STATUS */ 211 + dev_dbg(mtu->dev, "get_status=%x\n", *(u16 *)result); 212 + memcpy(mtu->setup_buf, result, sizeof(result)); 213 + mtu->ep0_req.mep = mtu->ep0; 214 + mtu->ep0_req.request.length = 2; 215 + mtu->ep0_req.request.buf = &mtu->setup_buf; 216 + mtu->ep0_req.request.complete = ep0_dummy_complete; 217 + ret = ep0_queue(mtu->ep0, &mtu->ep0_req); 218 + if (ret < 0) 219 + handled = ret; 220 + } 221 + return handled; 222 + } 223 + 224 + static int handle_test_mode(struct mtu3 *mtu, struct usb_ctrlrequest *setup) 225 + { 226 + void __iomem *mbase = mtu->mac_base; 227 + int handled = 1; 228 + 229 + switch (le16_to_cpu(setup->wIndex) >> 8) { 230 + case TEST_J: 231 + dev_dbg(mtu->dev, "TEST_J\n"); 232 + mtu->test_mode_nr = TEST_J_MODE; 233 + break; 234 + case TEST_K: 235 + dev_dbg(mtu->dev, "TEST_K\n"); 236 + mtu->test_mode_nr = TEST_K_MODE; 237 + break; 238 + case TEST_SE0_NAK: 239 + dev_dbg(mtu->dev, "TEST_SE0_NAK\n"); 240 + mtu->test_mode_nr = TEST_SE0_NAK_MODE; 241 + break; 242 + case TEST_PACKET: 243 + dev_dbg(mtu->dev, "TEST_PACKET\n"); 244 + mtu->test_mode_nr = TEST_PACKET_MODE; 245 + break; 246 + default: 247 + handled = -EINVAL; 248 + goto out; 249 + } 250 + 251 + mtu->test_mode = true; 252 + 253 + /* no TX completion interrupt, and need restart platform after test */ 254 + if (mtu->test_mode_nr == TEST_PACKET_MODE) 255 + ep0_load_test_packet(mtu); 256 + 257 + mtu3_writel(mbase, U3D_USB2_TEST_MODE, mtu->test_mode_nr); 258 + 259 + mtu->ep0_state = MU3D_EP0_STATE_SETUP; 260 + 261 + out: 262 + return handled; 263 + } 264 + 265 + static int ep0_handle_feature_dev(struct mtu3 *mtu, 266 + struct usb_ctrlrequest *setup, bool set) 267 + { 268 + int handled = -EINVAL; 269 + 270 + switch (le16_to_cpu(setup->wValue)) { 271 + case USB_DEVICE_REMOTE_WAKEUP: 272 + mtu->may_wakeup = !!set; 273 + handled = 1; 274 + break; 275 + case USB_DEVICE_TEST_MODE: 276 + if (!set || (mtu->g.speed != USB_SPEED_HIGH) || 277 + (le16_to_cpu(setup->wIndex) & 0xff)) 278 + break; 279 + 280 + handled = handle_test_mode(mtu, setup); 281 + break; 282 + default: 283 + handled = -EINVAL; 284 + break; 285 + } 286 + return handled; 287 + } 288 + 289 + static int ep0_handle_feature(struct mtu3 *mtu, 290 + struct usb_ctrlrequest *setup, bool set) 291 + { 292 + struct mtu3_ep *mep; 293 + int handled = -EINVAL; 294 + int is_in; 295 + u16 value; 296 + u16 index; 297 + u8 epnum; 298 + 299 + value = le16_to_cpu(setup->wValue); 300 + index = le16_to_cpu(setup->wIndex); 301 + 302 + switch (setup->bRequestType & USB_RECIP_MASK) { 303 + case USB_RECIP_DEVICE: 304 + handled = ep0_handle_feature_dev(mtu, setup, set); 305 + break; 306 + case USB_RECIP_ENDPOINT: 307 + epnum = index & USB_ENDPOINT_NUMBER_MASK; 308 + if (epnum == 0 || epnum >= mtu->num_eps || 309 + value != USB_ENDPOINT_HALT) 310 + break; 311 + 312 + is_in = index & USB_DIR_IN; 313 + mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum; 314 + if (!mep->desc) 315 + break; 316 + 317 + handled = 1; 318 + /* ignore request if endpoint is wedged */ 319 + if (mep->wedged) 320 + break; 321 + 322 + mtu3_ep_stall_set(mep, set); 323 + break; 324 + default: 325 + /* class, vendor, etc ... delegate */ 326 + handled = 0; 327 + break; 328 + } 329 + return handled; 330 + } 331 + 332 + /* 333 + * handle all control requests can be handled 334 + * returns: 335 + * negative errno - error happened 336 + * zero - need delegate SETUP to gadget driver 337 + * positive - already handled 338 + */ 339 + static int handle_standard_request(struct mtu3 *mtu, 340 + struct usb_ctrlrequest *setup) 341 + { 342 + void __iomem *mbase = mtu->mac_base; 343 + enum usb_device_state state = mtu->g.state; 344 + int handled = -EINVAL; 345 + u32 dev_conf; 346 + u16 value; 347 + 348 + value = le16_to_cpu(setup->wValue); 349 + 350 + /* the gadget driver handles everything except what we must handle */ 351 + switch (setup->bRequest) { 352 + case USB_REQ_SET_ADDRESS: 353 + /* change it after the status stage */ 354 + mtu->address = (u8) (value & 0x7f); 355 + dev_dbg(mtu->dev, "set address to 0x%x\n", mtu->address); 356 + 357 + dev_conf = mtu3_readl(mbase, U3D_DEVICE_CONF); 358 + dev_conf &= ~DEV_ADDR_MSK; 359 + dev_conf |= DEV_ADDR(mtu->address); 360 + mtu3_writel(mbase, U3D_DEVICE_CONF, dev_conf); 361 + 362 + if (mtu->address) 363 + usb_gadget_set_state(&mtu->g, USB_STATE_ADDRESS); 364 + else 365 + usb_gadget_set_state(&mtu->g, USB_STATE_DEFAULT); 366 + 367 + handled = 1; 368 + break; 369 + case USB_REQ_SET_CONFIGURATION: 370 + if (state == USB_STATE_ADDRESS) { 371 + usb_gadget_set_state(&mtu->g, 372 + USB_STATE_CONFIGURED); 373 + } else if (state == USB_STATE_CONFIGURED) { 374 + /* 375 + * USB2 spec sec 9.4.7, if wValue is 0 then dev 376 + * is moved to addressed state 377 + */ 378 + if (!value) 379 + usb_gadget_set_state(&mtu->g, 380 + USB_STATE_ADDRESS); 381 + } 382 + handled = 0; 383 + break; 384 + case USB_REQ_CLEAR_FEATURE: 385 + handled = ep0_handle_feature(mtu, setup, 0); 386 + break; 387 + case USB_REQ_SET_FEATURE: 388 + handled = ep0_handle_feature(mtu, setup, 1); 389 + break; 390 + case USB_REQ_GET_STATUS: 391 + handled = ep0_get_status(mtu, setup); 392 + break; 393 + case USB_REQ_SET_ISOCH_DELAY: 394 + handled = 1; 395 + break; 396 + default: 397 + /* delegate SET_CONFIGURATION, etc */ 398 + handled = 0; 399 + } 400 + 401 + return handled; 402 + } 403 + 404 + /* receive an data packet (OUT) */ 405 + static void ep0_rx_state(struct mtu3 *mtu) 406 + { 407 + struct mtu3_request *mreq; 408 + struct usb_request *req; 409 + void __iomem *mbase = mtu->mac_base; 410 + u32 maxp; 411 + u32 csr; 412 + u16 count = 0; 413 + 414 + dev_dbg(mtu->dev, "%s\n", __func__); 415 + 416 + csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS; 417 + mreq = next_ep0_request(mtu); 418 + req = &mreq->request; 419 + 420 + /* read packet and ack; or stall because of gadget driver bug */ 421 + if (req) { 422 + void *buf = req->buf + req->actual; 423 + unsigned int len = req->length - req->actual; 424 + 425 + /* read the buffer */ 426 + count = mtu3_readl(mbase, U3D_RXCOUNT0); 427 + if (count > len) { 428 + req->status = -EOVERFLOW; 429 + count = len; 430 + } 431 + ep0_read_fifo(mtu->ep0, buf, count); 432 + req->actual += count; 433 + csr |= EP0_RXPKTRDY; 434 + 435 + maxp = mtu->g.ep0->maxpacket; 436 + if (count < maxp || req->actual == req->length) { 437 + mtu->ep0_state = MU3D_EP0_STATE_SETUP; 438 + dev_dbg(mtu->dev, "ep0 state: %s\n", 439 + decode_ep0_state(mtu)); 440 + 441 + csr |= EP0_DATAEND; 442 + } else { 443 + req = NULL; 444 + } 445 + } else { 446 + csr |= EP0_RXPKTRDY | EP0_SENDSTALL; 447 + dev_dbg(mtu->dev, "%s: SENDSTALL\n", __func__); 448 + } 449 + 450 + mtu3_writel(mbase, U3D_EP0CSR, csr); 451 + 452 + /* give back the request if have received all data */ 453 + if (req) 454 + ep0_req_giveback(mtu, req); 455 + 456 + } 457 + 458 + /* transmitting to the host (IN) */ 459 + static void ep0_tx_state(struct mtu3 *mtu) 460 + { 461 + struct mtu3_request *mreq = next_ep0_request(mtu); 462 + struct usb_request *req; 463 + u32 csr; 464 + u8 *src; 465 + u8 count; 466 + u32 maxp; 467 + 468 + dev_dbg(mtu->dev, "%s\n", __func__); 469 + 470 + if (!mreq) 471 + return; 472 + 473 + maxp = mtu->g.ep0->maxpacket; 474 + req = &mreq->request; 475 + 476 + /* load the data */ 477 + src = (u8 *)req->buf + req->actual; 478 + count = min(maxp, req->length - req->actual); 479 + if (count) 480 + ep0_write_fifo(mtu->ep0, src, count); 481 + 482 + dev_dbg(mtu->dev, "%s act=%d, len=%d, cnt=%d, maxp=%d zero=%d\n", 483 + __func__, req->actual, req->length, count, maxp, req->zero); 484 + 485 + req->actual += count; 486 + 487 + if ((count < maxp) 488 + || ((req->actual == req->length) && !req->zero)) 489 + mtu->ep0_state = MU3D_EP0_STATE_TX_END; 490 + 491 + /* send it out, triggering a "txpktrdy cleared" irq */ 492 + csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS; 493 + mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr | EP0_TXPKTRDY); 494 + 495 + dev_dbg(mtu->dev, "%s ep0csr=0x%x\n", __func__, 496 + mtu3_readl(mtu->mac_base, U3D_EP0CSR)); 497 + } 498 + 499 + static void ep0_read_setup(struct mtu3 *mtu, struct usb_ctrlrequest *setup) 500 + { 501 + struct mtu3_request *mreq; 502 + u32 count; 503 + u32 csr; 504 + 505 + csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS; 506 + count = mtu3_readl(mtu->mac_base, U3D_RXCOUNT0); 507 + 508 + ep0_read_fifo(mtu->ep0, (u8 *)setup, count); 509 + 510 + dev_dbg(mtu->dev, "SETUP req%02x.%02x v%04x i%04x l%04x\n", 511 + setup->bRequestType, setup->bRequest, 512 + le16_to_cpu(setup->wValue), le16_to_cpu(setup->wIndex), 513 + le16_to_cpu(setup->wLength)); 514 + 515 + /* clean up any leftover transfers */ 516 + mreq = next_ep0_request(mtu); 517 + if (mreq) 518 + ep0_req_giveback(mtu, &mreq->request); 519 + 520 + if (le16_to_cpu(setup->wLength) == 0) { 521 + ; /* no data stage, nothing to do */ 522 + } else if (setup->bRequestType & USB_DIR_IN) { 523 + mtu3_writel(mtu->mac_base, U3D_EP0CSR, 524 + csr | EP0_SETUPPKTRDY | EP0_DPHTX); 525 + mtu->ep0_state = MU3D_EP0_STATE_TX; 526 + } else { 527 + mtu3_writel(mtu->mac_base, U3D_EP0CSR, 528 + (csr | EP0_SETUPPKTRDY) & (~EP0_DPHTX)); 529 + mtu->ep0_state = MU3D_EP0_STATE_RX; 530 + } 531 + } 532 + 533 + static int ep0_handle_setup(struct mtu3 *mtu) 534 + __releases(mtu->lock) 535 + __acquires(mtu->lock) 536 + { 537 + struct usb_ctrlrequest setup; 538 + struct mtu3_request *mreq; 539 + void __iomem *mbase = mtu->mac_base; 540 + int handled = 0; 541 + 542 + ep0_read_setup(mtu, &setup); 543 + 544 + if ((setup.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) 545 + handled = handle_standard_request(mtu, &setup); 546 + 547 + dev_dbg(mtu->dev, "handled %d, ep0_state: %s\n", 548 + handled, decode_ep0_state(mtu)); 549 + 550 + if (handled < 0) 551 + goto stall; 552 + else if (handled > 0) 553 + goto finish; 554 + 555 + handled = forward_to_driver(mtu, &setup); 556 + if (handled < 0) { 557 + stall: 558 + dev_dbg(mtu->dev, "%s stall (%d)\n", __func__, handled); 559 + 560 + ep0_stall_set(mtu->ep0, true, 561 + le16_to_cpu(setup.wLength) ? 0 : EP0_SETUPPKTRDY); 562 + 563 + return 0; 564 + } 565 + 566 + finish: 567 + if (mtu->test_mode) { 568 + ; /* nothing to do */ 569 + } else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */ 570 + 571 + mtu3_writel(mbase, U3D_EP0CSR, 572 + (mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS) 573 + | EP0_SETUPPKTRDY | EP0_DATAEND); 574 + 575 + /* complete zlp request directly */ 576 + mreq = next_ep0_request(mtu); 577 + if (mreq && !mreq->request.length) 578 + ep0_req_giveback(mtu, &mreq->request); 579 + } 580 + 581 + return 0; 582 + } 583 + 584 + irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu) 585 + { 586 + void __iomem *mbase = mtu->mac_base; 587 + struct mtu3_request *mreq; 588 + u32 int_status; 589 + irqreturn_t ret = IRQ_NONE; 590 + u32 csr; 591 + u32 len; 592 + 593 + int_status = mtu3_readl(mbase, U3D_EPISR); 594 + int_status &= mtu3_readl(mbase, U3D_EPIER); 595 + mtu3_writel(mbase, U3D_EPISR, int_status); /* W1C */ 596 + 597 + /* only handle ep0's */ 598 + if (!(int_status & EP0ISR)) 599 + return IRQ_NONE; 600 + 601 + csr = mtu3_readl(mbase, U3D_EP0CSR); 602 + 603 + dev_dbg(mtu->dev, "%s csr=0x%x\n", __func__, csr); 604 + 605 + /* we sent a stall.. need to clear it now.. */ 606 + if (csr & EP0_SENTSTALL) { 607 + ep0_stall_set(mtu->ep0, false, 0); 608 + csr = mtu3_readl(mbase, U3D_EP0CSR); 609 + ret = IRQ_HANDLED; 610 + } 611 + dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu)); 612 + 613 + switch (mtu->ep0_state) { 614 + case MU3D_EP0_STATE_TX: 615 + /* irq on clearing txpktrdy */ 616 + if ((csr & EP0_FIFOFULL) == 0) { 617 + ep0_tx_state(mtu); 618 + ret = IRQ_HANDLED; 619 + } 620 + break; 621 + case MU3D_EP0_STATE_RX: 622 + /* irq on set rxpktrdy */ 623 + if (csr & EP0_RXPKTRDY) { 624 + ep0_rx_state(mtu); 625 + ret = IRQ_HANDLED; 626 + } 627 + break; 628 + case MU3D_EP0_STATE_TX_END: 629 + mtu3_writel(mbase, U3D_EP0CSR, 630 + (csr & EP0_W1C_BITS) | EP0_DATAEND); 631 + 632 + mreq = next_ep0_request(mtu); 633 + if (mreq) 634 + ep0_req_giveback(mtu, &mreq->request); 635 + 636 + mtu->ep0_state = MU3D_EP0_STATE_SETUP; 637 + ret = IRQ_HANDLED; 638 + dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu)); 639 + break; 640 + case MU3D_EP0_STATE_SETUP: 641 + if (!(csr & EP0_SETUPPKTRDY)) 642 + break; 643 + 644 + len = mtu3_readl(mbase, U3D_RXCOUNT0); 645 + if (len != 8) { 646 + dev_err(mtu->dev, "SETUP packet len %d != 8 ?\n", len); 647 + break; 648 + } 649 + 650 + ep0_handle_setup(mtu); 651 + ret = IRQ_HANDLED; 652 + break; 653 + default: 654 + /* can't happen */ 655 + ep0_stall_set(mtu->ep0, true, 0); 656 + WARN_ON(1); 657 + break; 658 + } 659 + 660 + return ret; 661 + } 662 + 663 + 664 + static int mtu3_ep0_enable(struct usb_ep *ep, 665 + const struct usb_endpoint_descriptor *desc) 666 + { 667 + /* always enabled */ 668 + return -EINVAL; 669 + } 670 + 671 + static int mtu3_ep0_disable(struct usb_ep *ep) 672 + { 673 + /* always enabled */ 674 + return -EINVAL; 675 + } 676 + 677 + static int ep0_queue(struct mtu3_ep *mep, struct mtu3_request *mreq) 678 + { 679 + struct mtu3 *mtu = mep->mtu; 680 + 681 + mreq->mtu = mtu; 682 + mreq->request.actual = 0; 683 + mreq->request.status = -EINPROGRESS; 684 + 685 + dev_dbg(mtu->dev, "%s %s (ep0_state: %s), len#%d\n", __func__, 686 + mep->name, decode_ep0_state(mtu), mreq->request.length); 687 + 688 + if (!list_empty(&mep->req_list)) 689 + return -EBUSY; 690 + 691 + switch (mtu->ep0_state) { 692 + case MU3D_EP0_STATE_SETUP: 693 + case MU3D_EP0_STATE_RX: /* control-OUT data */ 694 + case MU3D_EP0_STATE_TX: /* control-IN data */ 695 + break; 696 + default: 697 + dev_err(mtu->dev, "%s, error in ep0 state %s\n", __func__, 698 + decode_ep0_state(mtu)); 699 + return -EINVAL; 700 + } 701 + 702 + list_add_tail(&mreq->list, &mep->req_list); 703 + 704 + /* sequence #1, IN ... start writing the data */ 705 + if (mtu->ep0_state == MU3D_EP0_STATE_TX) 706 + ep0_tx_state(mtu); 707 + 708 + return 0; 709 + } 710 + 711 + static int mtu3_ep0_queue(struct usb_ep *ep, 712 + struct usb_request *req, gfp_t gfp) 713 + { 714 + struct mtu3_ep *mep; 715 + struct mtu3_request *mreq; 716 + struct mtu3 *mtu; 717 + unsigned long flags; 718 + int ret = 0; 719 + 720 + if (!ep || !req) 721 + return -EINVAL; 722 + 723 + mep = to_mtu3_ep(ep); 724 + mtu = mep->mtu; 725 + mreq = to_mtu3_request(req); 726 + 727 + spin_lock_irqsave(&mtu->lock, flags); 728 + ret = ep0_queue(mep, mreq); 729 + spin_unlock_irqrestore(&mtu->lock, flags); 730 + return ret; 731 + } 732 + 733 + static int mtu3_ep0_dequeue(struct usb_ep *ep, struct usb_request *req) 734 + { 735 + /* we just won't support this */ 736 + return -EINVAL; 737 + } 738 + 739 + static int mtu3_ep0_halt(struct usb_ep *ep, int value) 740 + { 741 + struct mtu3_ep *mep; 742 + struct mtu3 *mtu; 743 + unsigned long flags; 744 + int ret = 0; 745 + 746 + if (!ep || !value) 747 + return -EINVAL; 748 + 749 + mep = to_mtu3_ep(ep); 750 + mtu = mep->mtu; 751 + 752 + dev_dbg(mtu->dev, "%s\n", __func__); 753 + 754 + spin_lock_irqsave(&mtu->lock, flags); 755 + 756 + if (!list_empty(&mep->req_list)) { 757 + ret = -EBUSY; 758 + goto cleanup; 759 + } 760 + 761 + switch (mtu->ep0_state) { 762 + /* 763 + * stalls are usually issued after parsing SETUP packet, either 764 + * directly in irq context from setup() or else later. 765 + */ 766 + case MU3D_EP0_STATE_TX: 767 + case MU3D_EP0_STATE_TX_END: 768 + case MU3D_EP0_STATE_RX: 769 + case MU3D_EP0_STATE_SETUP: 770 + ep0_stall_set(mtu->ep0, true, 0); 771 + break; 772 + default: 773 + dev_dbg(mtu->dev, "ep0 can't halt in state %s\n", 774 + decode_ep0_state(mtu)); 775 + ret = -EINVAL; 776 + } 777 + 778 + cleanup: 779 + spin_unlock_irqrestore(&mtu->lock, flags); 780 + return ret; 781 + } 782 + 783 + const struct usb_ep_ops mtu3_ep0_ops = { 784 + .enable = mtu3_ep0_enable, 785 + .disable = mtu3_ep0_disable, 786 + .alloc_request = mtu3_alloc_request, 787 + .free_request = mtu3_free_request, 788 + .queue = mtu3_ep0_queue, 789 + .dequeue = mtu3_ep0_dequeue, 790 + .set_halt = mtu3_ep0_halt, 791 + };
+440
drivers/usb/mtu3/mtu3_hw_regs.h
··· 1 + /* 2 + * mtu3_hw_regs.h - MediaTek USB3 DRD register and field definitions 3 + * 4 + * Copyright (C) 2016 MediaTek Inc. 5 + * 6 + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> 7 + * 8 + * This software is licensed under the terms of the GNU General Public 9 + * License version 2, as published by the Free Software Foundation, and 10 + * may be copied, distributed, and modified under those terms. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + */ 18 + 19 + #ifndef _SSUSB_HW_REGS_H_ 20 + #define _SSUSB_HW_REGS_H_ 21 + 22 + /* segment offset of MAC register */ 23 + #define SSUSB_DEV_BASE 0x0000 24 + #define SSUSB_EPCTL_CSR_BASE 0x0800 25 + #define SSUSB_USB3_MAC_CSR_BASE 0x1400 26 + #define SSUSB_USB3_SYS_CSR_BASE 0x1400 27 + #define SSUSB_USB2_CSR_BASE 0x2400 28 + 29 + /* IPPC register in Infra */ 30 + #define SSUSB_SIFSLV_IPPC_BASE 0x0000 31 + 32 + /* --------------- SSUSB_DEV REGISTER DEFINITION --------------- */ 33 + 34 + #define U3D_LV1ISR (SSUSB_DEV_BASE + 0x0000) 35 + #define U3D_LV1IER (SSUSB_DEV_BASE + 0x0004) 36 + #define U3D_LV1IESR (SSUSB_DEV_BASE + 0x0008) 37 + #define U3D_LV1IECR (SSUSB_DEV_BASE + 0x000C) 38 + 39 + #define U3D_EPISR (SSUSB_DEV_BASE + 0x0080) 40 + #define U3D_EPIER (SSUSB_DEV_BASE + 0x0084) 41 + #define U3D_EPIESR (SSUSB_DEV_BASE + 0x0088) 42 + #define U3D_EPIECR (SSUSB_DEV_BASE + 0x008C) 43 + 44 + #define U3D_EP0CSR (SSUSB_DEV_BASE + 0x0100) 45 + #define U3D_RXCOUNT0 (SSUSB_DEV_BASE + 0x0108) 46 + #define U3D_RESERVED (SSUSB_DEV_BASE + 0x010C) 47 + #define U3D_TX1CSR0 (SSUSB_DEV_BASE + 0x0110) 48 + #define U3D_TX1CSR1 (SSUSB_DEV_BASE + 0x0114) 49 + #define U3D_TX1CSR2 (SSUSB_DEV_BASE + 0x0118) 50 + 51 + #define U3D_RX1CSR0 (SSUSB_DEV_BASE + 0x0210) 52 + #define U3D_RX1CSR1 (SSUSB_DEV_BASE + 0x0214) 53 + #define U3D_RX1CSR2 (SSUSB_DEV_BASE + 0x0218) 54 + 55 + #define U3D_FIFO0 (SSUSB_DEV_BASE + 0x0300) 56 + 57 + #define U3D_QCR0 (SSUSB_DEV_BASE + 0x0400) 58 + #define U3D_QCR1 (SSUSB_DEV_BASE + 0x0404) 59 + #define U3D_QCR2 (SSUSB_DEV_BASE + 0x0408) 60 + #define U3D_QCR3 (SSUSB_DEV_BASE + 0x040C) 61 + 62 + #define U3D_TXQCSR1 (SSUSB_DEV_BASE + 0x0510) 63 + #define U3D_TXQSAR1 (SSUSB_DEV_BASE + 0x0514) 64 + #define U3D_TXQCPR1 (SSUSB_DEV_BASE + 0x0518) 65 + 66 + #define U3D_RXQCSR1 (SSUSB_DEV_BASE + 0x0610) 67 + #define U3D_RXQSAR1 (SSUSB_DEV_BASE + 0x0614) 68 + #define U3D_RXQCPR1 (SSUSB_DEV_BASE + 0x0618) 69 + #define U3D_RXQLDPR1 (SSUSB_DEV_BASE + 0x061C) 70 + 71 + #define U3D_QISAR0 (SSUSB_DEV_BASE + 0x0700) 72 + #define U3D_QIER0 (SSUSB_DEV_BASE + 0x0704) 73 + #define U3D_QIESR0 (SSUSB_DEV_BASE + 0x0708) 74 + #define U3D_QIECR0 (SSUSB_DEV_BASE + 0x070C) 75 + #define U3D_QISAR1 (SSUSB_DEV_BASE + 0x0710) 76 + #define U3D_QIER1 (SSUSB_DEV_BASE + 0x0714) 77 + #define U3D_QIESR1 (SSUSB_DEV_BASE + 0x0718) 78 + #define U3D_QIECR1 (SSUSB_DEV_BASE + 0x071C) 79 + 80 + #define U3D_TQERRIR0 (SSUSB_DEV_BASE + 0x0780) 81 + #define U3D_TQERRIER0 (SSUSB_DEV_BASE + 0x0784) 82 + #define U3D_TQERRIESR0 (SSUSB_DEV_BASE + 0x0788) 83 + #define U3D_TQERRIECR0 (SSUSB_DEV_BASE + 0x078C) 84 + #define U3D_RQERRIR0 (SSUSB_DEV_BASE + 0x07C0) 85 + #define U3D_RQERRIER0 (SSUSB_DEV_BASE + 0x07C4) 86 + #define U3D_RQERRIESR0 (SSUSB_DEV_BASE + 0x07C8) 87 + #define U3D_RQERRIECR0 (SSUSB_DEV_BASE + 0x07CC) 88 + #define U3D_RQERRIR1 (SSUSB_DEV_BASE + 0x07D0) 89 + #define U3D_RQERRIER1 (SSUSB_DEV_BASE + 0x07D4) 90 + #define U3D_RQERRIESR1 (SSUSB_DEV_BASE + 0x07D8) 91 + #define U3D_RQERRIECR1 (SSUSB_DEV_BASE + 0x07DC) 92 + 93 + #define U3D_CAP_EP0FFSZ (SSUSB_DEV_BASE + 0x0C04) 94 + #define U3D_CAP_EPNTXFFSZ (SSUSB_DEV_BASE + 0x0C08) 95 + #define U3D_CAP_EPNRXFFSZ (SSUSB_DEV_BASE + 0x0C0C) 96 + #define U3D_CAP_EPINFO (SSUSB_DEV_BASE + 0x0C10) 97 + #define U3D_MISC_CTRL (SSUSB_DEV_BASE + 0x0C84) 98 + 99 + /*---------------- SSUSB_DEV FIELD DEFINITION ---------------*/ 100 + 101 + /* U3D_LV1ISR */ 102 + #define EP_CTRL_INTR BIT(5) 103 + #define MAC2_INTR BIT(4) 104 + #define DMA_INTR BIT(3) 105 + #define MAC3_INTR BIT(2) 106 + #define QMU_INTR BIT(1) 107 + #define BMU_INTR BIT(0) 108 + 109 + /* U3D_LV1IECR */ 110 + #define LV1IECR_MSK GENMASK(31, 0) 111 + 112 + /* U3D_EPISR */ 113 + #define EPRISR(x) (BIT(16) << (x)) 114 + #define EPTISR(x) (BIT(0) << (x)) 115 + #define EP0ISR BIT(0) 116 + 117 + /* U3D_EP0CSR */ 118 + #define EP0_SENDSTALL BIT(25) 119 + #define EP0_FIFOFULL BIT(23) 120 + #define EP0_SENTSTALL BIT(22) 121 + #define EP0_DPHTX BIT(20) 122 + #define EP0_DATAEND BIT(19) 123 + #define EP0_TXPKTRDY BIT(18) 124 + #define EP0_SETUPPKTRDY BIT(17) 125 + #define EP0_RXPKTRDY BIT(16) 126 + #define EP0_MAXPKTSZ_MSK GENMASK(9, 0) 127 + #define EP0_MAXPKTSZ(x) ((x) & EP0_MAXPKTSZ_MSK) 128 + #define EP0_W1C_BITS (~(EP0_RXPKTRDY | EP0_SETUPPKTRDY | EP0_SENTSTALL)) 129 + 130 + /* U3D_TX1CSR0 */ 131 + #define TX_DMAREQEN BIT(29) 132 + #define TX_FIFOFULL BIT(25) 133 + #define TX_FIFOEMPTY BIT(24) 134 + #define TX_SENTSTALL BIT(22) 135 + #define TX_SENDSTALL BIT(21) 136 + #define TX_TXPKTRDY BIT(16) 137 + #define TX_TXMAXPKTSZ_MSK GENMASK(10, 0) 138 + #define TX_TXMAXPKTSZ(x) ((x) & TX_TXMAXPKTSZ_MSK) 139 + #define TX_W1C_BITS (~(TX_SENTSTALL)) 140 + 141 + /* U3D_TX1CSR1 */ 142 + #define TX_MULT(x) (((x) & 0x3) << 22) 143 + #define TX_MAX_PKT(x) (((x) & 0x3f) << 16) 144 + #define TX_SLOT(x) (((x) & 0x3f) << 8) 145 + #define TX_TYPE(x) (((x) & 0x3) << 4) 146 + #define TX_SS_BURST(x) (((x) & 0xf) << 0) 147 + 148 + /* for TX_TYPE & RX_TYPE */ 149 + #define TYPE_BULK (0x0) 150 + #define TYPE_INT (0x1) 151 + #define TYPE_ISO (0x2) 152 + #define TYPE_MASK (0x3) 153 + 154 + /* U3D_TX1CSR2 */ 155 + #define TX_BINTERVAL(x) (((x) & 0xff) << 24) 156 + #define TX_FIFOSEGSIZE(x) (((x) & 0xf) << 16) 157 + #define TX_FIFOADDR(x) (((x) & 0x1fff) << 0) 158 + 159 + /* U3D_RX1CSR0 */ 160 + #define RX_DMAREQEN BIT(29) 161 + #define RX_SENTSTALL BIT(22) 162 + #define RX_SENDSTALL BIT(21) 163 + #define RX_RXPKTRDY BIT(16) 164 + #define RX_RXMAXPKTSZ_MSK GENMASK(10, 0) 165 + #define RX_RXMAXPKTSZ(x) ((x) & RX_RXMAXPKTSZ_MSK) 166 + #define RX_W1C_BITS (~(RX_SENTSTALL | RX_RXPKTRDY)) 167 + 168 + /* U3D_RX1CSR1 */ 169 + #define RX_MULT(x) (((x) & 0x3) << 22) 170 + #define RX_MAX_PKT(x) (((x) & 0x3f) << 16) 171 + #define RX_SLOT(x) (((x) & 0x3f) << 8) 172 + #define RX_TYPE(x) (((x) & 0x3) << 4) 173 + #define RX_SS_BURST(x) (((x) & 0xf) << 0) 174 + 175 + /* U3D_RX1CSR2 */ 176 + #define RX_BINTERVAL(x) (((x) & 0xff) << 24) 177 + #define RX_FIFOSEGSIZE(x) (((x) & 0xf) << 16) 178 + #define RX_FIFOADDR(x) (((x) & 0x1fff) << 0) 179 + 180 + /* U3D_QCR0 */ 181 + #define QMU_RX_CS_EN(x) (BIT(16) << (x)) 182 + #define QMU_TX_CS_EN(x) (BIT(0) << (x)) 183 + #define QMU_CS16B_EN BIT(0) 184 + 185 + /* U3D_QCR1 */ 186 + #define QMU_TX_ZLP(x) (BIT(0) << (x)) 187 + 188 + /* U3D_QCR3 */ 189 + #define QMU_RX_COZ(x) (BIT(16) << (x)) 190 + #define QMU_RX_ZLP(x) (BIT(0) << (x)) 191 + 192 + /* U3D_TXQCSR1 */ 193 + /* U3D_RXQCSR1 */ 194 + #define QMU_Q_ACTIVE BIT(15) 195 + #define QMU_Q_STOP BIT(2) 196 + #define QMU_Q_RESUME BIT(1) 197 + #define QMU_Q_START BIT(0) 198 + 199 + /* U3D_QISAR0, U3D_QIER0, U3D_QIESR0, U3D_QIECR0 */ 200 + #define QMU_RX_DONE_INT(x) (BIT(16) << (x)) 201 + #define QMU_TX_DONE_INT(x) (BIT(0) << (x)) 202 + 203 + /* U3D_QISAR1, U3D_QIER1, U3D_QIESR1, U3D_QIECR1 */ 204 + #define RXQ_ZLPERR_INT BIT(20) 205 + #define RXQ_LENERR_INT BIT(18) 206 + #define RXQ_CSERR_INT BIT(17) 207 + #define RXQ_EMPTY_INT BIT(16) 208 + #define TXQ_LENERR_INT BIT(2) 209 + #define TXQ_CSERR_INT BIT(1) 210 + #define TXQ_EMPTY_INT BIT(0) 211 + 212 + /* U3D_TQERRIR0, U3D_TQERRIER0, U3D_TQERRIESR0, U3D_TQERRIECR0 */ 213 + #define QMU_TX_LEN_ERR(x) (BIT(16) << (x)) 214 + #define QMU_TX_CS_ERR(x) (BIT(0) << (x)) 215 + 216 + /* U3D_RQERRIR0, U3D_RQERRIER0, U3D_RQERRIESR0, U3D_RQERRIECR0 */ 217 + #define QMU_RX_LEN_ERR(x) (BIT(16) << (x)) 218 + #define QMU_RX_CS_ERR(x) (BIT(0) << (x)) 219 + 220 + /* U3D_RQERRIR1, U3D_RQERRIER1, U3D_RQERRIESR1, U3D_RQERRIECR1 */ 221 + #define QMU_RX_ZLP_ERR(n) (BIT(16) << (n)) 222 + 223 + /* U3D_CAP_EPINFO */ 224 + #define CAP_RX_EP_NUM(x) (((x) >> 8) & 0x1f) 225 + #define CAP_TX_EP_NUM(x) ((x) & 0x1f) 226 + 227 + /* U3D_MISC_CTRL */ 228 + #define VBUS_ON BIT(1) 229 + #define VBUS_FRC_EN BIT(0) 230 + 231 + 232 + /*---------------- SSUSB_EPCTL_CSR REGISTER DEFINITION ----------------*/ 233 + 234 + #define U3D_DEVICE_CONF (SSUSB_EPCTL_CSR_BASE + 0x0000) 235 + #define U3D_EP_RST (SSUSB_EPCTL_CSR_BASE + 0x0004) 236 + 237 + #define U3D_DEV_LINK_INTR_ENABLE (SSUSB_EPCTL_CSR_BASE + 0x0050) 238 + #define U3D_DEV_LINK_INTR (SSUSB_EPCTL_CSR_BASE + 0x0054) 239 + 240 + /*---------------- SSUSB_EPCTL_CSR FIELD DEFINITION ----------------*/ 241 + 242 + /* U3D_DEVICE_CONF */ 243 + #define DEV_ADDR_MSK GENMASK(30, 24) 244 + #define DEV_ADDR(x) ((0x7f & (x)) << 24) 245 + #define HW_USB2_3_SEL BIT(18) 246 + #define SW_USB2_3_SEL_EN BIT(17) 247 + #define SW_USB2_3_SEL BIT(16) 248 + #define SSUSB_DEV_SPEED(x) ((x) & 0x7) 249 + 250 + /* U3D_EP_RST */ 251 + #define EP1_IN_RST BIT(17) 252 + #define EP1_OUT_RST BIT(1) 253 + #define EP_RST(is_in, epnum) (((is_in) ? BIT(16) : BIT(0)) << (epnum)) 254 + #define EP0_RST BIT(0) 255 + 256 + /* U3D_DEV_LINK_INTR_ENABLE */ 257 + /* U3D_DEV_LINK_INTR */ 258 + #define SSUSB_DEV_SPEED_CHG_INTR BIT(0) 259 + 260 + 261 + /*---------------- SSUSB_USB3_MAC_CSR REGISTER DEFINITION ----------------*/ 262 + 263 + #define U3D_USB3_CONFIG (SSUSB_USB3_MAC_CSR_BASE + 0x001C) 264 + 265 + /*---------------- SSUSB_USB3_MAC_CSR FIELD DEFINITION ----------------*/ 266 + 267 + /* U3D_USB3_CONFIG */ 268 + #define USB3_EN BIT(0) 269 + 270 + /*---------------- SSUSB_USB3_SYS_CSR REGISTER DEFINITION ----------------*/ 271 + 272 + #define U3D_LINK_UX_INACT_TIMER (SSUSB_USB3_SYS_CSR_BASE + 0x020C) 273 + #define U3D_LINK_POWER_CONTROL (SSUSB_USB3_SYS_CSR_BASE + 0x0210) 274 + #define U3D_LINK_ERR_COUNT (SSUSB_USB3_SYS_CSR_BASE + 0x0214) 275 + 276 + /*---------------- SSUSB_USB3_SYS_CSR FIELD DEFINITION ----------------*/ 277 + 278 + /* U3D_LINK_UX_INACT_TIMER */ 279 + #define DEV_U2_INACT_TIMEOUT_MSK GENMASK(23, 16) 280 + #define DEV_U2_INACT_TIMEOUT_VALUE(x) (((x) & 0xff) << 16) 281 + #define U2_INACT_TIMEOUT_MSK GENMASK(15, 8) 282 + #define U1_INACT_TIMEOUT_MSK GENMASK(7, 0) 283 + #define U1_INACT_TIMEOUT_VALUE(x) ((x) & 0xff) 284 + 285 + /* U3D_LINK_POWER_CONTROL */ 286 + #define SW_U2_ACCEPT_ENABLE BIT(9) 287 + #define SW_U1_ACCEPT_ENABLE BIT(8) 288 + #define UX_EXIT BIT(5) 289 + #define LGO_U3 BIT(4) 290 + #define LGO_U2 BIT(3) 291 + #define LGO_U1 BIT(2) 292 + #define SW_U2_REQUEST_ENABLE BIT(1) 293 + #define SW_U1_REQUEST_ENABLE BIT(0) 294 + 295 + /* U3D_LINK_ERR_COUNT */ 296 + #define CLR_LINK_ERR_CNT BIT(16) 297 + #define LINK_ERROR_COUNT GENMASK(15, 0) 298 + 299 + /*---------------- SSUSB_USB2_CSR REGISTER DEFINITION ----------------*/ 300 + 301 + #define U3D_POWER_MANAGEMENT (SSUSB_USB2_CSR_BASE + 0x0004) 302 + #define U3D_DEVICE_CONTROL (SSUSB_USB2_CSR_BASE + 0x000C) 303 + #define U3D_USB2_TEST_MODE (SSUSB_USB2_CSR_BASE + 0x0014) 304 + #define U3D_COMMON_USB_INTR_ENABLE (SSUSB_USB2_CSR_BASE + 0x0018) 305 + #define U3D_COMMON_USB_INTR (SSUSB_USB2_CSR_BASE + 0x001C) 306 + #define U3D_LINK_RESET_INFO (SSUSB_USB2_CSR_BASE + 0x0024) 307 + #define U3D_USB20_FRAME_NUM (SSUSB_USB2_CSR_BASE + 0x003C) 308 + #define U3D_USB20_LPM_PARAMETER (SSUSB_USB2_CSR_BASE + 0x0044) 309 + #define U3D_USB20_MISC_CONTROL (SSUSB_USB2_CSR_BASE + 0x004C) 310 + 311 + /*---------------- SSUSB_USB2_CSR FIELD DEFINITION ----------------*/ 312 + 313 + /* U3D_POWER_MANAGEMENT */ 314 + #define LPM_BESL_STALL BIT(14) 315 + #define LPM_BESLD_STALL BIT(13) 316 + #define LPM_RWP BIT(11) 317 + #define LPM_HRWE BIT(10) 318 + #define LPM_MODE(x) (((x) & 0x3) << 8) 319 + #define ISO_UPDATE BIT(7) 320 + #define SOFT_CONN BIT(6) 321 + #define HS_ENABLE BIT(5) 322 + #define RESUME BIT(2) 323 + #define SUSPENDM_ENABLE BIT(0) 324 + 325 + /* U3D_DEVICE_CONTROL */ 326 + #define DC_HOSTREQ BIT(1) 327 + #define DC_SESSION BIT(0) 328 + 329 + /* U3D_USB2_TEST_MODE */ 330 + #define U2U3_AUTO_SWITCH BIT(10) 331 + #define LPM_FORCE_STALL BIT(8) 332 + #define FIFO_ACCESS BIT(6) 333 + #define FORCE_FS BIT(5) 334 + #define FORCE_HS BIT(4) 335 + #define TEST_PACKET_MODE BIT(3) 336 + #define TEST_K_MODE BIT(2) 337 + #define TEST_J_MODE BIT(1) 338 + #define TEST_SE0_NAK_MODE BIT(0) 339 + 340 + /* U3D_COMMON_USB_INTR_ENABLE */ 341 + /* U3D_COMMON_USB_INTR */ 342 + #define LPM_RESUME_INTR BIT(9) 343 + #define LPM_INTR BIT(8) 344 + #define DISCONN_INTR BIT(5) 345 + #define CONN_INTR BIT(4) 346 + #define SOF_INTR BIT(3) 347 + #define RESET_INTR BIT(2) 348 + #define RESUME_INTR BIT(1) 349 + #define SUSPEND_INTR BIT(0) 350 + 351 + /* U3D_LINK_RESET_INFO */ 352 + #define WTCHRP_MSK GENMASK(19, 16) 353 + 354 + /* U3D_USB20_LPM_PARAMETER */ 355 + #define LPM_BESLCK_U3(x) (((x) & 0xf) << 12) 356 + #define LPM_BESLCK(x) (((x) & 0xf) << 8) 357 + #define LPM_BESLDCK(x) (((x) & 0xf) << 4) 358 + #define LPM_BESL GENMASK(3, 0) 359 + 360 + /* U3D_USB20_MISC_CONTROL */ 361 + #define LPM_U3_ACK_EN BIT(0) 362 + 363 + /*---------------- SSUSB_SIFSLV_IPPC REGISTER DEFINITION ----------------*/ 364 + 365 + #define U3D_SSUSB_IP_PW_CTRL0 (SSUSB_SIFSLV_IPPC_BASE + 0x0000) 366 + #define U3D_SSUSB_IP_PW_CTRL1 (SSUSB_SIFSLV_IPPC_BASE + 0x0004) 367 + #define U3D_SSUSB_IP_PW_CTRL2 (SSUSB_SIFSLV_IPPC_BASE + 0x0008) 368 + #define U3D_SSUSB_IP_PW_CTRL3 (SSUSB_SIFSLV_IPPC_BASE + 0x000C) 369 + #define U3D_SSUSB_IP_PW_STS1 (SSUSB_SIFSLV_IPPC_BASE + 0x0010) 370 + #define U3D_SSUSB_IP_PW_STS2 (SSUSB_SIFSLV_IPPC_BASE + 0x0014) 371 + #define U3D_SSUSB_OTG_STS (SSUSB_SIFSLV_IPPC_BASE + 0x0018) 372 + #define U3D_SSUSB_OTG_STS_CLR (SSUSB_SIFSLV_IPPC_BASE + 0x001C) 373 + #define U3D_SSUSB_IP_XHCI_CAP (SSUSB_SIFSLV_IPPC_BASE + 0x0024) 374 + #define U3D_SSUSB_IP_DEV_CAP (SSUSB_SIFSLV_IPPC_BASE + 0x0028) 375 + #define U3D_SSUSB_OTG_INT_EN (SSUSB_SIFSLV_IPPC_BASE + 0x002C) 376 + #define U3D_SSUSB_U3_CTRL_0P (SSUSB_SIFSLV_IPPC_BASE + 0x0030) 377 + #define U3D_SSUSB_U2_CTRL_0P (SSUSB_SIFSLV_IPPC_BASE + 0x0050) 378 + #define U3D_SSUSB_REF_CK_CTRL (SSUSB_SIFSLV_IPPC_BASE + 0x008C) 379 + #define U3D_SSUSB_DEV_RST_CTRL (SSUSB_SIFSLV_IPPC_BASE + 0x0098) 380 + #define U3D_SSUSB_HW_ID (SSUSB_SIFSLV_IPPC_BASE + 0x00A0) 381 + #define U3D_SSUSB_HW_SUB_ID (SSUSB_SIFSLV_IPPC_BASE + 0x00A4) 382 + #define U3D_SSUSB_IP_SPARE0 (SSUSB_SIFSLV_IPPC_BASE + 0x00C8) 383 + 384 + /*---------------- SSUSB_SIFSLV_IPPC FIELD DEFINITION ----------------*/ 385 + 386 + /* U3D_SSUSB_IP_PW_CTRL0 */ 387 + #define SSUSB_IP_SW_RST BIT(0) 388 + 389 + /* U3D_SSUSB_IP_PW_CTRL1 */ 390 + #define SSUSB_IP_HOST_PDN BIT(0) 391 + 392 + /* U3D_SSUSB_IP_PW_CTRL2 */ 393 + #define SSUSB_IP_DEV_PDN BIT(0) 394 + 395 + /* U3D_SSUSB_IP_PW_CTRL3 */ 396 + #define SSUSB_IP_PCIE_PDN BIT(0) 397 + 398 + /* U3D_SSUSB_IP_PW_STS1 */ 399 + #define SSUSB_IP_SLEEP_STS BIT(30) 400 + #define SSUSB_U3_MAC_RST_B_STS BIT(16) 401 + #define SSUSB_XHCI_RST_B_STS BIT(11) 402 + #define SSUSB_SYS125_RST_B_STS BIT(10) 403 + #define SSUSB_REF_RST_B_STS BIT(8) 404 + #define SSUSB_SYSPLL_STABLE BIT(0) 405 + 406 + /* U3D_SSUSB_IP_PW_STS2 */ 407 + #define SSUSB_U2_MAC_SYS_RST_B_STS BIT(0) 408 + 409 + /* U3D_SSUSB_OTG_STS */ 410 + #define SSUSB_VBUS_VALID BIT(9) 411 + 412 + /* U3D_SSUSB_OTG_STS_CLR */ 413 + #define SSUSB_VBUS_INTR_CLR BIT(6) 414 + 415 + /* U3D_SSUSB_IP_XHCI_CAP */ 416 + #define SSUSB_IP_XHCI_U2_PORT_NUM(x) (((x) >> 8) & 0xff) 417 + #define SSUSB_IP_XHCI_U3_PORT_NUM(x) ((x) & 0xff) 418 + 419 + /* U3D_SSUSB_IP_DEV_CAP */ 420 + #define SSUSB_IP_DEV_U3_PORT_NUM(x) ((x) & 0xff) 421 + 422 + /* U3D_SSUSB_OTG_INT_EN */ 423 + #define SSUSB_VBUS_CHG_INT_A_EN BIT(7) 424 + #define SSUSB_VBUS_CHG_INT_B_EN BIT(6) 425 + 426 + /* U3D_SSUSB_U3_CTRL_0P */ 427 + #define SSUSB_U3_PORT_HOST_SEL BIT(2) 428 + #define SSUSB_U3_PORT_PDN BIT(1) 429 + #define SSUSB_U3_PORT_DIS BIT(0) 430 + 431 + /* U3D_SSUSB_U2_CTRL_0P */ 432 + #define SSUSB_U2_PORT_OTG_SEL BIT(7) 433 + #define SSUSB_U2_PORT_HOST_SEL BIT(2) 434 + #define SSUSB_U2_PORT_PDN BIT(1) 435 + #define SSUSB_U2_PORT_DIS BIT(0) 436 + 437 + /* U3D_SSUSB_DEV_RST_CTRL */ 438 + #define SSUSB_DEV_SW_RST BIT(0) 439 + 440 + #endif /* _SSUSB_HW_REGS_H_ */
+251
drivers/usb/mtu3/mtu3_plat.c
··· 1 + /* 2 + * Copyright (C) 2016 MediaTek Inc. 3 + * 4 + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> 5 + * 6 + * This software is licensed under the terms of the GNU General Public 7 + * License version 2, as published by the Free Software Foundation, and 8 + * may be copied, distributed, and modified under those terms. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + */ 16 + 17 + #include <linux/clk.h> 18 + #include <linux/dma-mapping.h> 19 + #include <linux/iopoll.h> 20 + #include <linux/kernel.h> 21 + #include <linux/module.h> 22 + #include <linux/of_address.h> 23 + #include <linux/of_irq.h> 24 + #include <linux/platform_device.h> 25 + 26 + #include "mtu3.h" 27 + 28 + /* u2-port0 should be powered on and enabled; */ 29 + int ssusb_check_clocks(struct mtu3 *mtu, u32 ex_clks) 30 + { 31 + void __iomem *ibase = mtu->ippc_base; 32 + u32 value, check_val; 33 + int ret; 34 + 35 + check_val = ex_clks | SSUSB_SYS125_RST_B_STS | SSUSB_SYSPLL_STABLE | 36 + SSUSB_REF_RST_B_STS; 37 + 38 + ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS1, value, 39 + (check_val == (value & check_val)), 100, 20000); 40 + if (ret) { 41 + dev_err(mtu->dev, "clks of sts1 are not stable!\n"); 42 + return ret; 43 + } 44 + 45 + ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS2, value, 46 + (value & SSUSB_U2_MAC_SYS_RST_B_STS), 100, 10000); 47 + if (ret) { 48 + dev_err(mtu->dev, "mac2 clock is not stable\n"); 49 + return ret; 50 + } 51 + 52 + return 0; 53 + } 54 + 55 + static int ssusb_rscs_init(struct mtu3 *mtu) 56 + { 57 + int ret = 0; 58 + 59 + ret = regulator_enable(mtu->vusb33); 60 + if (ret) { 61 + dev_err(mtu->dev, "failed to enable vusb33\n"); 62 + goto vusb33_err; 63 + } 64 + 65 + ret = clk_prepare_enable(mtu->sys_clk); 66 + if (ret) { 67 + dev_err(mtu->dev, "failed to enable sys_clk\n"); 68 + goto clk_err; 69 + } 70 + 71 + ret = phy_init(mtu->phy); 72 + if (ret) { 73 + dev_err(mtu->dev, "failed to init phy\n"); 74 + goto phy_init_err; 75 + } 76 + 77 + ret = phy_power_on(mtu->phy); 78 + if (ret) { 79 + dev_err(mtu->dev, "failed to power on phy\n"); 80 + goto phy_err; 81 + } 82 + 83 + return 0; 84 + 85 + phy_err: 86 + phy_exit(mtu->phy); 87 + 88 + phy_init_err: 89 + clk_disable_unprepare(mtu->sys_clk); 90 + 91 + clk_err: 92 + regulator_disable(mtu->vusb33); 93 + 94 + vusb33_err: 95 + 96 + return ret; 97 + } 98 + 99 + static void ssusb_rscs_exit(struct mtu3 *mtu) 100 + { 101 + clk_disable_unprepare(mtu->sys_clk); 102 + regulator_disable(mtu->vusb33); 103 + phy_power_off(mtu->phy); 104 + phy_exit(mtu->phy); 105 + } 106 + 107 + static void ssusb_ip_sw_reset(struct mtu3 *mtu) 108 + { 109 + mtu3_setbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST); 110 + udelay(1); 111 + mtu3_clrbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST); 112 + } 113 + 114 + static int get_ssusb_rscs(struct platform_device *pdev, struct mtu3 *mtu) 115 + { 116 + struct device_node *node = pdev->dev.of_node; 117 + struct device *dev = &pdev->dev; 118 + struct resource *res; 119 + 120 + mtu->phy = devm_of_phy_get_by_index(dev, node, 0); 121 + if (IS_ERR(mtu->phy)) { 122 + dev_err(dev, "failed to get phy\n"); 123 + return PTR_ERR(mtu->phy); 124 + } 125 + 126 + mtu->irq = platform_get_irq(pdev, 0); 127 + if (mtu->irq <= 0) { 128 + dev_err(dev, "fail to get irq number\n"); 129 + return -ENODEV; 130 + } 131 + dev_info(dev, "irq %d\n", mtu->irq); 132 + 133 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac"); 134 + mtu->mac_base = devm_ioremap_resource(dev, res); 135 + if (IS_ERR(mtu->mac_base)) { 136 + dev_err(dev, "error mapping memory for dev mac\n"); 137 + return PTR_ERR(mtu->mac_base); 138 + } 139 + 140 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ippc"); 141 + mtu->ippc_base = devm_ioremap_resource(dev, res); 142 + if (IS_ERR(mtu->ippc_base)) { 143 + dev_err(dev, "failed to map memory for ippc\n"); 144 + return PTR_ERR(mtu->ippc_base); 145 + } 146 + 147 + mtu->vusb33 = devm_regulator_get(&pdev->dev, "vusb33"); 148 + if (IS_ERR(mtu->vusb33)) { 149 + dev_err(dev, "failed to get vusb33\n"); 150 + return PTR_ERR(mtu->vusb33); 151 + } 152 + 153 + mtu->sys_clk = devm_clk_get(dev, "sys_ck"); 154 + if (IS_ERR(mtu->sys_clk)) { 155 + dev_err(dev, "failed to get sys clock\n"); 156 + return PTR_ERR(mtu->sys_clk); 157 + } 158 + 159 + return 0; 160 + } 161 + 162 + static int mtu3_probe(struct platform_device *pdev) 163 + { 164 + struct device *dev = &pdev->dev; 165 + struct mtu3 *mtu; 166 + int ret = -ENOMEM; 167 + 168 + /* all elements are set to ZERO as default value */ 169 + mtu = devm_kzalloc(dev, sizeof(struct mtu3), GFP_KERNEL); 170 + if (!mtu) 171 + return -ENOMEM; 172 + 173 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 174 + if (ret) { 175 + dev_err(dev, "No suitable DMA config available\n"); 176 + return -ENOTSUPP; 177 + } 178 + 179 + platform_set_drvdata(pdev, mtu); 180 + mtu->dev = dev; 181 + spin_lock_init(&mtu->lock); 182 + 183 + ret = get_ssusb_rscs(pdev, mtu); 184 + if (ret) 185 + return ret; 186 + 187 + /* enable power domain */ 188 + pm_runtime_enable(dev); 189 + pm_runtime_get_sync(dev); 190 + device_enable_async_suspend(dev); 191 + 192 + ret = ssusb_rscs_init(mtu); 193 + if (ret) 194 + goto comm_init_err; 195 + 196 + ssusb_ip_sw_reset(mtu); 197 + 198 + ret = ssusb_gadget_init(mtu); 199 + if (ret) { 200 + dev_err(dev, "failed to initialize gadget\n"); 201 + goto comm_exit; 202 + } 203 + 204 + return 0; 205 + 206 + comm_exit: 207 + ssusb_rscs_exit(mtu); 208 + 209 + comm_init_err: 210 + pm_runtime_put_sync(dev); 211 + pm_runtime_disable(dev); 212 + 213 + return ret; 214 + } 215 + 216 + static int mtu3_remove(struct platform_device *pdev) 217 + { 218 + struct mtu3 *mtu = platform_get_drvdata(pdev); 219 + 220 + ssusb_gadget_exit(mtu); 221 + ssusb_rscs_exit(mtu); 222 + pm_runtime_put_sync(&pdev->dev); 223 + pm_runtime_disable(&pdev->dev); 224 + 225 + return 0; 226 + } 227 + 228 + #ifdef CONFIG_OF 229 + 230 + static const struct of_device_id mtu3_of_match[] = { 231 + {.compatible = "mediatek,mt8173-mtu3",}, 232 + {}, 233 + }; 234 + 235 + MODULE_DEVICE_TABLE(of, mtu3_of_match); 236 + 237 + #endif 238 + 239 + static struct platform_driver mtu3_driver = { 240 + .probe = mtu3_probe, 241 + .remove = mtu3_remove, 242 + .driver = { 243 + .name = MTU3_DRIVER_NAME, 244 + .of_match_table = of_match_ptr(mtu3_of_match), 245 + }, 246 + }; 247 + module_platform_driver(mtu3_driver); 248 + 249 + MODULE_AUTHOR("Chunfeng Yun <chunfeng.yun@mediatek.com>"); 250 + MODULE_LICENSE("GPL v2"); 251 + MODULE_DESCRIPTION("MediaTek USB3 DRD Controller Driver");
+573
drivers/usb/mtu3/mtu3_qmu.c
··· 1 + /* 2 + * mtu3_qmu.c - Queue Management Unit driver for device controller 3 + * 4 + * Copyright (C) 2016 MediaTek Inc. 5 + * 6 + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> 7 + * 8 + * This software is licensed under the terms of the GNU General Public 9 + * License version 2, as published by the Free Software Foundation, and 10 + * may be copied, distributed, and modified under those terms. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + */ 18 + 19 + /* 20 + * Queue Management Unit (QMU) is designed to unload SW effort 21 + * to serve DMA interrupts. 22 + * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD), 23 + * SW links data buffers and triggers QMU to send / receive data to 24 + * host / from device at a time. 25 + * And now only GPD is supported. 26 + * 27 + * For more detailed information, please refer to QMU Programming Guide 28 + */ 29 + 30 + #include <linux/dmapool.h> 31 + #include <linux/iopoll.h> 32 + 33 + #include "mtu3.h" 34 + 35 + #define QMU_CHECKSUM_LEN 16 36 + 37 + #define GPD_FLAGS_HWO BIT(0) 38 + #define GPD_FLAGS_BDP BIT(1) 39 + #define GPD_FLAGS_BPS BIT(2) 40 + #define GPD_FLAGS_IOC BIT(7) 41 + 42 + #define GPD_EXT_FLAG_ZLP BIT(5) 43 + 44 + 45 + static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring, 46 + dma_addr_t dma_addr) 47 + { 48 + dma_addr_t dma_base = ring->dma; 49 + struct qmu_gpd *gpd_head = ring->start; 50 + u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head); 51 + 52 + if (offset >= MAX_GPD_NUM) 53 + return NULL; 54 + 55 + return gpd_head + offset; 56 + } 57 + 58 + static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring, 59 + struct qmu_gpd *gpd) 60 + { 61 + dma_addr_t dma_base = ring->dma; 62 + struct qmu_gpd *gpd_head = ring->start; 63 + u32 offset; 64 + 65 + offset = gpd - gpd_head; 66 + if (offset >= MAX_GPD_NUM) 67 + return 0; 68 + 69 + return dma_base + (offset * sizeof(*gpd)); 70 + } 71 + 72 + static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd) 73 + { 74 + ring->start = gpd; 75 + ring->enqueue = gpd; 76 + ring->dequeue = gpd; 77 + ring->end = gpd + MAX_GPD_NUM - 1; 78 + } 79 + 80 + static void reset_gpd_list(struct mtu3_ep *mep) 81 + { 82 + struct mtu3_gpd_ring *ring = &mep->gpd_ring; 83 + struct qmu_gpd *gpd = ring->start; 84 + 85 + if (gpd) { 86 + gpd->flag &= ~GPD_FLAGS_HWO; 87 + gpd_ring_init(ring, gpd); 88 + } 89 + } 90 + 91 + int mtu3_gpd_ring_alloc(struct mtu3_ep *mep) 92 + { 93 + struct qmu_gpd *gpd; 94 + struct mtu3_gpd_ring *ring = &mep->gpd_ring; 95 + 96 + /* software own all gpds as default */ 97 + gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma); 98 + if (gpd == NULL) 99 + return -ENOMEM; 100 + 101 + gpd_ring_init(ring, gpd); 102 + 103 + return 0; 104 + } 105 + 106 + void mtu3_gpd_ring_free(struct mtu3_ep *mep) 107 + { 108 + struct mtu3_gpd_ring *ring = &mep->gpd_ring; 109 + 110 + dma_pool_free(mep->mtu->qmu_gpd_pool, 111 + ring->start, ring->dma); 112 + memset(ring, 0, sizeof(*ring)); 113 + } 114 + 115 + /* 116 + * calculate check sum of a gpd or bd 117 + * add "noinline" and "mb" to prevent wrong calculation 118 + */ 119 + static noinline u8 qmu_calc_checksum(u8 *data) 120 + { 121 + u8 chksum = 0; 122 + int i; 123 + 124 + data[1] = 0x0; /* set checksum to 0 */ 125 + 126 + mb(); /* ensure the gpd/bd is really up-to-date */ 127 + for (i = 0; i < QMU_CHECKSUM_LEN; i++) 128 + chksum += data[i]; 129 + 130 + /* Default: HWO=1, @flag[bit0] */ 131 + chksum += 1; 132 + 133 + return 0xFF - chksum; 134 + } 135 + 136 + void mtu3_qmu_resume(struct mtu3_ep *mep) 137 + { 138 + struct mtu3 *mtu = mep->mtu; 139 + void __iomem *mbase = mtu->mac_base; 140 + int epnum = mep->epnum; 141 + u32 offset; 142 + 143 + offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum); 144 + 145 + mtu3_writel(mbase, offset, QMU_Q_RESUME); 146 + if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE)) 147 + mtu3_writel(mbase, offset, QMU_Q_RESUME); 148 + } 149 + 150 + static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring) 151 + { 152 + if (ring->enqueue < ring->end) 153 + ring->enqueue++; 154 + else 155 + ring->enqueue = ring->start; 156 + 157 + return ring->enqueue; 158 + } 159 + 160 + static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring) 161 + { 162 + if (ring->dequeue < ring->end) 163 + ring->dequeue++; 164 + else 165 + ring->dequeue = ring->start; 166 + 167 + return ring->dequeue; 168 + } 169 + 170 + /* check if a ring is emtpy */ 171 + int gpd_ring_empty(struct mtu3_gpd_ring *ring) 172 + { 173 + struct qmu_gpd *enq = ring->enqueue; 174 + struct qmu_gpd *next; 175 + 176 + if (ring->enqueue < ring->end) 177 + next = enq + 1; 178 + else 179 + next = ring->start; 180 + 181 + /* one gpd is reserved to simplify gpd preparation */ 182 + return next == ring->dequeue; 183 + } 184 + 185 + int mtu3_prepare_transfer(struct mtu3_ep *mep) 186 + { 187 + return gpd_ring_empty(&mep->gpd_ring); 188 + } 189 + 190 + static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) 191 + { 192 + struct qmu_gpd *enq; 193 + struct mtu3_gpd_ring *ring = &mep->gpd_ring; 194 + struct qmu_gpd *gpd = ring->enqueue; 195 + struct usb_request *req = &mreq->request; 196 + 197 + /* set all fields to zero as default value */ 198 + memset(gpd, 0, sizeof(*gpd)); 199 + 200 + gpd->buffer = cpu_to_le32((u32)req->dma); 201 + gpd->buf_len = cpu_to_le16(req->length); 202 + gpd->flag |= GPD_FLAGS_IOC; 203 + 204 + /* get the next GPD */ 205 + enq = advance_enq_gpd(ring); 206 + dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p\n", 207 + mep->epnum, gpd, enq); 208 + 209 + enq->flag &= ~GPD_FLAGS_HWO; 210 + gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq)); 211 + 212 + if (req->zero) 213 + gpd->ext_flag |= GPD_EXT_FLAG_ZLP; 214 + 215 + gpd->chksum = qmu_calc_checksum((u8 *)gpd); 216 + gpd->flag |= GPD_FLAGS_HWO; 217 + 218 + mreq->gpd = gpd; 219 + 220 + return 0; 221 + } 222 + 223 + static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) 224 + { 225 + struct qmu_gpd *enq; 226 + struct mtu3_gpd_ring *ring = &mep->gpd_ring; 227 + struct qmu_gpd *gpd = ring->enqueue; 228 + struct usb_request *req = &mreq->request; 229 + 230 + /* set all fields to zero as default value */ 231 + memset(gpd, 0, sizeof(*gpd)); 232 + 233 + gpd->buffer = cpu_to_le32((u32)req->dma); 234 + gpd->data_buf_len = cpu_to_le16(req->length); 235 + gpd->flag |= GPD_FLAGS_IOC; 236 + 237 + /* get the next GPD */ 238 + enq = advance_enq_gpd(ring); 239 + dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p\n", 240 + mep->epnum, gpd, enq); 241 + 242 + enq->flag &= ~GPD_FLAGS_HWO; 243 + gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq)); 244 + gpd->chksum = qmu_calc_checksum((u8 *)gpd); 245 + gpd->flag |= GPD_FLAGS_HWO; 246 + 247 + mreq->gpd = gpd; 248 + 249 + return 0; 250 + } 251 + 252 + void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) 253 + { 254 + 255 + if (mep->is_in) 256 + mtu3_prepare_tx_gpd(mep, mreq); 257 + else 258 + mtu3_prepare_rx_gpd(mep, mreq); 259 + } 260 + 261 + int mtu3_qmu_start(struct mtu3_ep *mep) 262 + { 263 + struct mtu3 *mtu = mep->mtu; 264 + void __iomem *mbase = mtu->mac_base; 265 + struct mtu3_gpd_ring *ring = &mep->gpd_ring; 266 + u8 epnum = mep->epnum; 267 + 268 + if (mep->is_in) { 269 + /* set QMU start address */ 270 + mtu3_writel(mbase, USB_QMU_TQSAR(mep->epnum), ring->dma); 271 + mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); 272 + mtu3_setbits(mbase, U3D_QCR0, QMU_TX_CS_EN(epnum)); 273 + /* send zero length packet according to ZLP flag in GPD */ 274 + mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum)); 275 + mtu3_writel(mbase, U3D_TQERRIESR0, 276 + QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum)); 277 + 278 + if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) { 279 + dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum); 280 + return 0; 281 + } 282 + mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START); 283 + 284 + } else { 285 + mtu3_writel(mbase, USB_QMU_RQSAR(mep->epnum), ring->dma); 286 + mtu3_setbits(mbase, MU3D_EP_RXCR0(mep->epnum), RX_DMAREQEN); 287 + mtu3_setbits(mbase, U3D_QCR0, QMU_RX_CS_EN(epnum)); 288 + /* don't expect ZLP */ 289 + mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum)); 290 + /* move to next GPD when receive ZLP */ 291 + mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum)); 292 + mtu3_writel(mbase, U3D_RQERRIESR0, 293 + QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum)); 294 + mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum)); 295 + 296 + if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) { 297 + dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum); 298 + return 0; 299 + } 300 + mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START); 301 + } 302 + 303 + return 0; 304 + } 305 + 306 + /* may called in atomic context */ 307 + void mtu3_qmu_stop(struct mtu3_ep *mep) 308 + { 309 + struct mtu3 *mtu = mep->mtu; 310 + void __iomem *mbase = mtu->mac_base; 311 + int epnum = mep->epnum; 312 + u32 value = 0; 313 + u32 qcsr; 314 + int ret; 315 + 316 + qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum); 317 + 318 + if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) { 319 + dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name); 320 + return; 321 + } 322 + mtu3_writel(mbase, qcsr, QMU_Q_STOP); 323 + 324 + ret = readl_poll_timeout_atomic(mbase + qcsr, value, 325 + !(value & QMU_Q_ACTIVE), 1, 1000); 326 + if (ret) { 327 + dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name); 328 + return; 329 + } 330 + 331 + dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name); 332 + } 333 + 334 + void mtu3_qmu_flush(struct mtu3_ep *mep) 335 + { 336 + 337 + dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__, 338 + ((mep->is_in) ? "TX" : "RX")); 339 + 340 + /*Stop QMU */ 341 + mtu3_qmu_stop(mep); 342 + reset_gpd_list(mep); 343 + } 344 + 345 + /* 346 + * QMU can't transfer zero length packet directly (a hardware limit 347 + * on old SoCs), so when needs to send ZLP, we intentionally trigger 348 + * a length error interrupt, and in the ISR sends a ZLP by BMU. 349 + */ 350 + static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum) 351 + { 352 + struct mtu3_ep *mep = mtu->in_eps + epnum; 353 + struct mtu3_gpd_ring *ring = &mep->gpd_ring; 354 + void __iomem *mbase = mtu->mac_base; 355 + struct qmu_gpd *gpd_current = NULL; 356 + dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum)); 357 + struct usb_request *req = NULL; 358 + struct mtu3_request *mreq; 359 + u32 txcsr = 0; 360 + int ret; 361 + 362 + mreq = next_request(mep); 363 + if (mreq && mreq->request.length == 0) 364 + req = &mreq->request; 365 + else 366 + return; 367 + 368 + gpd_current = gpd_dma_to_virt(ring, gpd_dma); 369 + 370 + if (le16_to_cpu(gpd_current->buf_len) != 0) { 371 + dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum); 372 + return; 373 + } 374 + 375 + dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq); 376 + 377 + mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); 378 + 379 + ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum), 380 + txcsr, !(txcsr & TX_FIFOFULL), 1, 1000); 381 + if (ret) { 382 + dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__); 383 + return; 384 + } 385 + mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY); 386 + 387 + /* by pass the current GDP */ 388 + gpd_current->flag |= GPD_FLAGS_BPS; 389 + gpd_current->chksum = qmu_calc_checksum((u8 *)gpd_current); 390 + gpd_current->flag |= GPD_FLAGS_HWO; 391 + 392 + /*enable DMAREQEN, switch back to QMU mode */ 393 + mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); 394 + mtu3_qmu_resume(mep); 395 + } 396 + 397 + /* 398 + * NOTE: request list maybe is already empty as following case: 399 + * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)--> 400 + * queue_tx --> process_tasklet(meanwhile, the second one is transferred, 401 + * tasklet process both of them)-->qmu_interrupt for second one. 402 + * To avoid upper case, put qmu_done_tx in ISR directly to process it. 403 + */ 404 + static void qmu_done_tx(struct mtu3 *mtu, u8 epnum) 405 + { 406 + struct mtu3_ep *mep = mtu->in_eps + epnum; 407 + struct mtu3_gpd_ring *ring = &mep->gpd_ring; 408 + void __iomem *mbase = mtu->mac_base; 409 + struct qmu_gpd *gpd = ring->dequeue; 410 + struct qmu_gpd *gpd_current = NULL; 411 + dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum)); 412 + struct usb_request *request = NULL; 413 + struct mtu3_request *mreq; 414 + 415 + /*transfer phy address got from QMU register to virtual address */ 416 + gpd_current = gpd_dma_to_virt(ring, gpd_dma); 417 + 418 + dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", 419 + __func__, epnum, gpd, gpd_current, ring->enqueue); 420 + 421 + while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { 422 + 423 + mreq = next_request(mep); 424 + 425 + if (mreq == NULL || mreq->gpd != gpd) { 426 + dev_err(mtu->dev, "no correct TX req is found\n"); 427 + break; 428 + } 429 + 430 + request = &mreq->request; 431 + request->actual = le16_to_cpu(gpd->buf_len); 432 + mtu3_req_complete(mep, request, 0); 433 + 434 + gpd = advance_deq_gpd(ring); 435 + } 436 + 437 + dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n", 438 + __func__, epnum, ring->dequeue, ring->enqueue); 439 + 440 + } 441 + 442 + static void qmu_done_rx(struct mtu3 *mtu, u8 epnum) 443 + { 444 + struct mtu3_ep *mep = mtu->out_eps + epnum; 445 + struct mtu3_gpd_ring *ring = &mep->gpd_ring; 446 + void __iomem *mbase = mtu->mac_base; 447 + struct qmu_gpd *gpd = ring->dequeue; 448 + struct qmu_gpd *gpd_current = NULL; 449 + dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_RQCPR(epnum)); 450 + struct usb_request *req = NULL; 451 + struct mtu3_request *mreq; 452 + 453 + gpd_current = gpd_dma_to_virt(ring, gpd_dma); 454 + 455 + dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", 456 + __func__, epnum, gpd, gpd_current, ring->enqueue); 457 + 458 + while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { 459 + 460 + mreq = next_request(mep); 461 + 462 + if (mreq == NULL || mreq->gpd != gpd) { 463 + dev_err(mtu->dev, "no correct RX req is found\n"); 464 + break; 465 + } 466 + req = &mreq->request; 467 + 468 + req->actual = le16_to_cpu(gpd->buf_len); 469 + mtu3_req_complete(mep, req, 0); 470 + 471 + gpd = advance_deq_gpd(ring); 472 + } 473 + 474 + dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n", 475 + __func__, epnum, ring->dequeue, ring->enqueue); 476 + } 477 + 478 + static void qmu_done_isr(struct mtu3 *mtu, u32 done_status) 479 + { 480 + int i; 481 + 482 + for (i = 1; i < mtu->num_eps; i++) { 483 + if (done_status & QMU_RX_DONE_INT(i)) 484 + qmu_done_rx(mtu, i); 485 + if (done_status & QMU_TX_DONE_INT(i)) 486 + qmu_done_tx(mtu, i); 487 + } 488 + } 489 + 490 + static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status) 491 + { 492 + void __iomem *mbase = mtu->mac_base; 493 + u32 errval; 494 + int i; 495 + 496 + if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) { 497 + errval = mtu3_readl(mbase, U3D_RQERRIR0); 498 + for (i = 1; i < mtu->num_eps; i++) { 499 + if (errval & QMU_RX_CS_ERR(i)) 500 + dev_err(mtu->dev, "Rx %d CS error!\n", i); 501 + 502 + if (errval & QMU_RX_LEN_ERR(i)) 503 + dev_err(mtu->dev, "RX %d Length error\n", i); 504 + } 505 + mtu3_writel(mbase, U3D_RQERRIR0, errval); 506 + } 507 + 508 + if (qmu_status & RXQ_ZLPERR_INT) { 509 + errval = mtu3_readl(mbase, U3D_RQERRIR1); 510 + for (i = 1; i < mtu->num_eps; i++) { 511 + if (errval & QMU_RX_ZLP_ERR(i)) 512 + dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i); 513 + } 514 + mtu3_writel(mbase, U3D_RQERRIR1, errval); 515 + } 516 + 517 + if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) { 518 + errval = mtu3_readl(mbase, U3D_TQERRIR0); 519 + for (i = 1; i < mtu->num_eps; i++) { 520 + if (errval & QMU_TX_CS_ERR(i)) 521 + dev_err(mtu->dev, "Tx %d checksum error!\n", i); 522 + 523 + if (errval & QMU_TX_LEN_ERR(i)) 524 + qmu_tx_zlp_error_handler(mtu, i); 525 + } 526 + mtu3_writel(mbase, U3D_TQERRIR0, errval); 527 + } 528 + } 529 + 530 + irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu) 531 + { 532 + void __iomem *mbase = mtu->mac_base; 533 + u32 qmu_status; 534 + u32 qmu_done_status; 535 + 536 + /* U3D_QISAR1 is read update */ 537 + qmu_status = mtu3_readl(mbase, U3D_QISAR1); 538 + qmu_status &= mtu3_readl(mbase, U3D_QIER1); 539 + 540 + qmu_done_status = mtu3_readl(mbase, U3D_QISAR0); 541 + qmu_done_status &= mtu3_readl(mbase, U3D_QIER0); 542 + mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */ 543 + dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n", 544 + (qmu_done_status & 0xFFFF), qmu_done_status >> 16, 545 + qmu_status); 546 + 547 + if (qmu_done_status) 548 + qmu_done_isr(mtu, qmu_done_status); 549 + 550 + if (qmu_status) 551 + qmu_exception_isr(mtu, qmu_status); 552 + 553 + return IRQ_HANDLED; 554 + } 555 + 556 + int mtu3_qmu_init(struct mtu3 *mtu) 557 + { 558 + 559 + compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B"); 560 + 561 + mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev, 562 + QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0); 563 + 564 + if (!mtu->qmu_gpd_pool) 565 + return -ENOMEM; 566 + 567 + return 0; 568 + } 569 + 570 + void mtu3_qmu_exit(struct mtu3 *mtu) 571 + { 572 + dma_pool_destroy(mtu->qmu_gpd_pool); 573 + }
+43
drivers/usb/mtu3/mtu3_qmu.h
··· 1 + /* 2 + * mtu3_qmu.h - Queue Management Unit driver header 3 + * 4 + * Copyright (C) 2016 MediaTek Inc. 5 + * 6 + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> 7 + * 8 + * This software is licensed under the terms of the GNU General Public 9 + * License version 2, as published by the Free Software Foundation, and 10 + * may be copied, distributed, and modified under those terms. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + */ 18 + 19 + #ifndef __MTK_QMU_H__ 20 + #define __MTK_QMU_H__ 21 + 22 + #define MAX_GPD_NUM 64 23 + #define QMU_GPD_SIZE (sizeof(struct qmu_gpd)) 24 + #define QMU_GPD_RING_SIZE (MAX_GPD_NUM * QMU_GPD_SIZE) 25 + 26 + #define GPD_BUF_SIZE 65532 27 + 28 + void mtu3_qmu_stop(struct mtu3_ep *mep); 29 + int mtu3_qmu_start(struct mtu3_ep *mep); 30 + void mtu3_qmu_resume(struct mtu3_ep *mep); 31 + void mtu3_qmu_flush(struct mtu3_ep *mep); 32 + 33 + void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq); 34 + int mtu3_prepare_transfer(struct mtu3_ep *mep); 35 + 36 + int mtu3_gpd_ring_alloc(struct mtu3_ep *mep); 37 + void mtu3_gpd_ring_free(struct mtu3_ep *mep); 38 + 39 + irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu); 40 + int mtu3_qmu_init(struct mtu3 *mtu); 41 + void mtu3_qmu_exit(struct mtu3 *mtu); 42 + 43 + #endif