at v6.19 2827 lines 73 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * core.c - DesignWare USB3 DRD Controller Core file 4 * 5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com 6 * 7 * Authors: Felipe Balbi <balbi@ti.com>, 8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 9 */ 10 11#include <linux/clk.h> 12#include <linux/version.h> 13#include <linux/module.h> 14#include <linux/kernel.h> 15#include <linux/slab.h> 16#include <linux/spinlock.h> 17#include <linux/platform_device.h> 18#include <linux/pm_runtime.h> 19#include <linux/interrupt.h> 20#include <linux/ioport.h> 21#include <linux/io.h> 22#include <linux/list.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/of.h> 26#include <linux/of_graph.h> 27#include <linux/acpi.h> 28#include <linux/pci.h> 29#include <linux/pinctrl/consumer.h> 30#include <linux/pinctrl/devinfo.h> 31#include <linux/reset.h> 32#include <linux/bitfield.h> 33 34#include <linux/usb/ch9.h> 35#include <linux/usb/gadget.h> 36#include <linux/usb/of.h> 37#include <linux/usb/otg.h> 38 39#include "core.h" 40#include "gadget.h" 41#include "glue.h" 42#include "io.h" 43 44#include "debug.h" 45#include "../host/xhci-ext-caps.h" 46 47#define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */ 48 49/** 50 * dwc3_get_dr_mode - Validates and sets dr_mode 51 * @dwc: pointer to our context structure 52 */ 53static int dwc3_get_dr_mode(struct dwc3 *dwc) 54{ 55 enum usb_dr_mode mode; 56 struct device *dev = dwc->dev; 57 unsigned int hw_mode; 58 59 if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) 60 dwc->dr_mode = USB_DR_MODE_OTG; 61 62 mode = dwc->dr_mode; 63 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 64 65 switch (hw_mode) { 66 case DWC3_GHWPARAMS0_MODE_GADGET: 67 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) { 68 dev_err(dev, 69 "Controller does not support host mode.\n"); 70 return -EINVAL; 71 } 72 mode = USB_DR_MODE_PERIPHERAL; 73 break; 74 case DWC3_GHWPARAMS0_MODE_HOST: 75 if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) { 76 dev_err(dev, 77 "Controller does not support device mode.\n"); 78 return -EINVAL; 79 } 80 mode = USB_DR_MODE_HOST; 81 break; 82 default: 83 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) 84 mode = USB_DR_MODE_HOST; 85 else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) 86 mode = USB_DR_MODE_PERIPHERAL; 87 88 /* 89 * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG 90 * mode. If the controller supports DRD but the dr_mode is not 91 * specified or set to OTG, then set the mode to peripheral. 92 */ 93 if (mode == USB_DR_MODE_OTG && !dwc->edev && 94 (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) || 95 !device_property_read_bool(dwc->dev, "usb-role-switch")) && 96 !DWC3_VER_IS_PRIOR(DWC3, 330A)) 97 mode = USB_DR_MODE_PERIPHERAL; 98 } 99 100 if (mode != dwc->dr_mode) { 101 dev_warn(dev, 102 "Configuration mismatch. dr_mode forced to %s\n", 103 mode == USB_DR_MODE_HOST ? "host" : "gadget"); 104 105 dwc->dr_mode = mode; 106 } 107 108 return 0; 109} 110 111void dwc3_enable_susphy(struct dwc3 *dwc, bool enable) 112{ 113 u32 reg; 114 int i; 115 116 for (i = 0; i < dwc->num_usb3_ports; i++) { 117 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(i)); 118 if (enable && !dwc->dis_u3_susphy_quirk) 119 reg |= DWC3_GUSB3PIPECTL_SUSPHY; 120 else 121 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 122 123 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(i), reg); 124 } 125 126 for (i = 0; i < dwc->num_usb2_ports; i++) { 127 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 128 if (enable && !dwc->dis_u2_susphy_quirk) 129 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 130 else 131 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 132 133 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 134 } 135} 136EXPORT_SYMBOL_GPL(dwc3_enable_susphy); 137 138void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy) 139{ 140 unsigned int hw_mode; 141 u32 reg; 142 143 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 144 145 /* 146 * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and 147 * GUSB2PHYCFG.SUSPHY should be cleared during mode switching, 148 * and they can be set after core initialization. 149 */ 150 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 151 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) { 152 if (DWC3_GCTL_PRTCAP(reg) != mode) 153 dwc3_enable_susphy(dwc, false); 154 } 155 156 reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); 157 reg |= DWC3_GCTL_PRTCAPDIR(mode); 158 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 159 160 dwc->current_dr_role = mode; 161 trace_dwc3_set_prtcap(mode); 162} 163EXPORT_SYMBOL_GPL(dwc3_set_prtcap); 164 165static void __dwc3_set_mode(struct work_struct *work) 166{ 167 struct dwc3 *dwc = work_to_dwc(work); 168 unsigned long flags; 169 int ret; 170 u32 reg; 171 u32 desired_dr_role; 172 int i; 173 174 mutex_lock(&dwc->mutex); 175 spin_lock_irqsave(&dwc->lock, flags); 176 desired_dr_role = dwc->desired_dr_role; 177 spin_unlock_irqrestore(&dwc->lock, flags); 178 179 pm_runtime_get_sync(dwc->dev); 180 181 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG) 182 dwc3_otg_update(dwc, 0); 183 184 if (!desired_dr_role) 185 goto out; 186 187 if (desired_dr_role == dwc->current_dr_role) 188 goto out; 189 190 if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) 191 goto out; 192 193 switch (dwc->current_dr_role) { 194 case DWC3_GCTL_PRTCAP_HOST: 195 dwc3_host_exit(dwc); 196 break; 197 case DWC3_GCTL_PRTCAP_DEVICE: 198 dwc3_gadget_exit(dwc); 199 dwc3_event_buffers_cleanup(dwc); 200 break; 201 case DWC3_GCTL_PRTCAP_OTG: 202 dwc3_otg_exit(dwc); 203 spin_lock_irqsave(&dwc->lock, flags); 204 dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE; 205 spin_unlock_irqrestore(&dwc->lock, flags); 206 dwc3_otg_update(dwc, 1); 207 break; 208 default: 209 break; 210 } 211 212 /* 213 * When current_dr_role is not set, there's no role switching. 214 * Only perform GCTL.CoreSoftReset when there's DRD role switching. 215 */ 216 if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) || 217 DWC3_VER_IS_PRIOR(DWC31, 190A)) && 218 desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { 219 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 220 reg |= DWC3_GCTL_CORESOFTRESET; 221 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 222 223 /* 224 * Wait for internal clocks to synchronized. DWC_usb31 and 225 * DWC_usb32 may need at least 50ms (less for DWC_usb3). To 226 * keep it consistent across different IPs, let's wait up to 227 * 100ms before clearing GCTL.CORESOFTRESET. 228 */ 229 msleep(100); 230 231 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 232 reg &= ~DWC3_GCTL_CORESOFTRESET; 233 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 234 } 235 236 spin_lock_irqsave(&dwc->lock, flags); 237 238 dwc3_set_prtcap(dwc, desired_dr_role, false); 239 240 spin_unlock_irqrestore(&dwc->lock, flags); 241 242 switch (desired_dr_role) { 243 case DWC3_GCTL_PRTCAP_HOST: 244 ret = dwc3_host_init(dwc); 245 if (ret) { 246 dev_err(dwc->dev, "failed to initialize host\n"); 247 } else { 248 if (dwc->usb2_phy) 249 otg_set_vbus(dwc->usb2_phy->otg, true); 250 251 for (i = 0; i < dwc->num_usb2_ports; i++) 252 phy_set_mode(dwc->usb2_generic_phy[i], PHY_MODE_USB_HOST); 253 for (i = 0; i < dwc->num_usb3_ports; i++) 254 phy_set_mode(dwc->usb3_generic_phy[i], PHY_MODE_USB_HOST); 255 256 if (dwc->dis_split_quirk) { 257 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 258 reg |= DWC3_GUCTL3_SPLITDISABLE; 259 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 260 } 261 } 262 break; 263 case DWC3_GCTL_PRTCAP_DEVICE: 264 dwc3_core_soft_reset(dwc); 265 266 dwc3_event_buffers_setup(dwc); 267 268 if (dwc->usb2_phy) 269 otg_set_vbus(dwc->usb2_phy->otg, false); 270 phy_set_mode(dwc->usb2_generic_phy[0], PHY_MODE_USB_DEVICE); 271 phy_set_mode(dwc->usb3_generic_phy[0], PHY_MODE_USB_DEVICE); 272 273 ret = dwc3_gadget_init(dwc); 274 if (ret) 275 dev_err(dwc->dev, "failed to initialize peripheral\n"); 276 break; 277 case DWC3_GCTL_PRTCAP_OTG: 278 dwc3_otg_init(dwc); 279 dwc3_otg_update(dwc, 0); 280 break; 281 default: 282 break; 283 } 284 285out: 286 pm_runtime_put_autosuspend(dwc->dev); 287 mutex_unlock(&dwc->mutex); 288} 289 290void dwc3_set_mode(struct dwc3 *dwc, u32 mode) 291{ 292 unsigned long flags; 293 294 if (dwc->dr_mode != USB_DR_MODE_OTG) 295 return; 296 297 spin_lock_irqsave(&dwc->lock, flags); 298 dwc->desired_dr_role = mode; 299 spin_unlock_irqrestore(&dwc->lock, flags); 300 301 queue_work(system_freezable_wq, &dwc->drd_work); 302} 303 304u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) 305{ 306 struct dwc3 *dwc = dep->dwc; 307 u32 reg; 308 309 dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE, 310 DWC3_GDBGFIFOSPACE_NUM(dep->number) | 311 DWC3_GDBGFIFOSPACE_TYPE(type)); 312 313 reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE); 314 315 return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg); 316} 317 318/** 319 * dwc3_core_soft_reset - Issues core soft reset and PHY reset 320 * @dwc: pointer to our context structure 321 */ 322int dwc3_core_soft_reset(struct dwc3 *dwc) 323{ 324 u32 reg; 325 int retries = 1000; 326 327 /* 328 * We're resetting only the device side because, if we're in host mode, 329 * XHCI driver will reset the host block. If dwc3 was configured for 330 * host-only mode, then we can return early. 331 */ 332 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) 333 return 0; 334 335 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 336 reg |= DWC3_DCTL_CSFTRST; 337 reg &= ~DWC3_DCTL_RUN_STOP; 338 dwc3_gadget_dctl_write_safe(dwc, reg); 339 340 /* 341 * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit 342 * is cleared only after all the clocks are synchronized. This can 343 * take a little more than 50ms. Set the polling rate at 20ms 344 * for 10 times instead. 345 */ 346 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) 347 retries = 10; 348 349 do { 350 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 351 if (!(reg & DWC3_DCTL_CSFTRST)) 352 goto done; 353 354 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) 355 msleep(20); 356 else 357 udelay(1); 358 } while (--retries); 359 360 dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n"); 361 return -ETIMEDOUT; 362 363done: 364 /* 365 * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit 366 * is cleared, we must wait at least 50ms before accessing the PHY 367 * domain (synchronization delay). 368 */ 369 if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A)) 370 msleep(50); 371 372 return 0; 373} 374 375/* 376 * dwc3_frame_length_adjustment - Adjusts frame length if required 377 * @dwc3: Pointer to our controller context structure 378 */ 379static void dwc3_frame_length_adjustment(struct dwc3 *dwc) 380{ 381 u32 reg; 382 u32 dft; 383 384 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 385 return; 386 387 if (dwc->fladj == 0) 388 return; 389 390 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); 391 dft = reg & DWC3_GFLADJ_30MHZ_MASK; 392 if (dft != dwc->fladj) { 393 reg &= ~DWC3_GFLADJ_30MHZ_MASK; 394 reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj; 395 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); 396 } 397} 398 399/** 400 * dwc3_ref_clk_period - Reference clock period configuration 401 * Default reference clock period depends on hardware 402 * configuration. For systems with reference clock that differs 403 * from the default, this will set clock period in DWC3_GUCTL 404 * register. 405 * @dwc: Pointer to our controller context structure 406 */ 407static void dwc3_ref_clk_period(struct dwc3 *dwc) 408{ 409 unsigned long period; 410 unsigned long fladj; 411 unsigned long decr; 412 unsigned long rate; 413 u32 reg; 414 415 if (dwc->ref_clk) { 416 rate = clk_get_rate(dwc->ref_clk); 417 if (!rate) 418 return; 419 period = NSEC_PER_SEC / rate; 420 } else if (dwc->ref_clk_per) { 421 period = dwc->ref_clk_per; 422 rate = NSEC_PER_SEC / period; 423 } else { 424 return; 425 } 426 427 reg = dwc3_readl(dwc->regs, DWC3_GUCTL); 428 reg &= ~DWC3_GUCTL_REFCLKPER_MASK; 429 reg |= FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, period); 430 dwc3_writel(dwc->regs, DWC3_GUCTL, reg); 431 432 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 433 return; 434 435 /* 436 * The calculation below is 437 * 438 * 125000 * (NSEC_PER_SEC / (rate * period) - 1) 439 * 440 * but rearranged for fixed-point arithmetic. The division must be 441 * 64-bit because 125000 * NSEC_PER_SEC doesn't fit in 32 bits (and 442 * neither does rate * period). 443 * 444 * Note that rate * period ~= NSEC_PER_SECOND, minus the number of 445 * nanoseconds of error caused by the truncation which happened during 446 * the division when calculating rate or period (whichever one was 447 * derived from the other). We first calculate the relative error, then 448 * scale it to units of 8 ppm. 449 */ 450 fladj = div64_u64(125000ULL * NSEC_PER_SEC, (u64)rate * period); 451 fladj -= 125000; 452 453 /* 454 * The documented 240MHz constant is scaled by 2 to get PLS1 as well. 455 */ 456 decr = 480000000 / rate; 457 458 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); 459 reg &= ~DWC3_GFLADJ_REFCLK_FLADJ_MASK 460 & ~DWC3_GFLADJ_240MHZDECR 461 & ~DWC3_GFLADJ_240MHZDECR_PLS1; 462 reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj) 463 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1) 464 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1); 465 466 if (dwc->gfladj_refclk_lpm_sel) 467 reg |= DWC3_GFLADJ_REFCLK_LPM_SEL; 468 469 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); 470} 471 472/** 473 * dwc3_free_one_event_buffer - Frees one event buffer 474 * @dwc: Pointer to our controller context structure 475 * @evt: Pointer to event buffer to be freed 476 */ 477static void dwc3_free_one_event_buffer(struct dwc3 *dwc, 478 struct dwc3_event_buffer *evt) 479{ 480 dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma); 481} 482 483/** 484 * dwc3_alloc_one_event_buffer - Allocates one event buffer structure 485 * @dwc: Pointer to our controller context structure 486 * @length: size of the event buffer 487 * 488 * Returns a pointer to the allocated event buffer structure on success 489 * otherwise ERR_PTR(errno). 490 */ 491static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc, 492 unsigned int length) 493{ 494 struct dwc3_event_buffer *evt; 495 496 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL); 497 if (!evt) 498 return ERR_PTR(-ENOMEM); 499 500 evt->dwc = dwc; 501 evt->length = length; 502 evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL); 503 if (!evt->cache) 504 return ERR_PTR(-ENOMEM); 505 506 evt->buf = dma_alloc_coherent(dwc->sysdev, length, 507 &evt->dma, GFP_KERNEL); 508 if (!evt->buf) 509 return ERR_PTR(-ENOMEM); 510 511 return evt; 512} 513 514/** 515 * dwc3_free_event_buffers - frees all allocated event buffers 516 * @dwc: Pointer to our controller context structure 517 */ 518static void dwc3_free_event_buffers(struct dwc3 *dwc) 519{ 520 struct dwc3_event_buffer *evt; 521 522 evt = dwc->ev_buf; 523 if (evt) 524 dwc3_free_one_event_buffer(dwc, evt); 525} 526 527/** 528 * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length 529 * @dwc: pointer to our controller context structure 530 * @length: size of event buffer 531 * 532 * Returns 0 on success otherwise negative errno. In the error case, dwc 533 * may contain some buffers allocated but not all which were requested. 534 */ 535static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length) 536{ 537 struct dwc3_event_buffer *evt; 538 unsigned int hw_mode; 539 540 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 541 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) { 542 dwc->ev_buf = NULL; 543 return 0; 544 } 545 546 evt = dwc3_alloc_one_event_buffer(dwc, length); 547 if (IS_ERR(evt)) { 548 dev_err(dwc->dev, "can't allocate event buffer\n"); 549 return PTR_ERR(evt); 550 } 551 dwc->ev_buf = evt; 552 553 return 0; 554} 555 556/** 557 * dwc3_event_buffers_setup - setup our allocated event buffers 558 * @dwc: pointer to our controller context structure 559 * 560 * Returns 0 on success otherwise negative errno. 561 */ 562int dwc3_event_buffers_setup(struct dwc3 *dwc) 563{ 564 struct dwc3_event_buffer *evt; 565 u32 reg; 566 567 if (!dwc->ev_buf) 568 return 0; 569 570 evt = dwc->ev_buf; 571 evt->lpos = 0; 572 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 573 lower_32_bits(evt->dma)); 574 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 575 upper_32_bits(evt->dma)); 576 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), 577 DWC3_GEVNTSIZ_SIZE(evt->length)); 578 579 /* Clear any stale event */ 580 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 581 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg); 582 return 0; 583} 584 585void dwc3_event_buffers_cleanup(struct dwc3 *dwc) 586{ 587 struct dwc3_event_buffer *evt; 588 u32 reg; 589 590 if (!dwc->ev_buf) 591 return; 592 /* 593 * Exynos platforms may not be able to access event buffer if the 594 * controller failed to halt on dwc3_core_exit(). 595 */ 596 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 597 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 598 return; 599 600 evt = dwc->ev_buf; 601 602 evt->lpos = 0; 603 604 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0); 605 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0); 606 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK 607 | DWC3_GEVNTSIZ_SIZE(0)); 608 609 /* Clear any stale event */ 610 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 611 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg); 612} 613 614static void dwc3_core_num_eps(struct dwc3 *dwc) 615{ 616 struct dwc3_hwparams *parms = &dwc->hwparams; 617 618 dwc->num_eps = DWC3_NUM_EPS(parms); 619} 620 621static void dwc3_cache_hwparams(struct dwc3 *dwc) 622{ 623 struct dwc3_hwparams *parms = &dwc->hwparams; 624 625 parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0); 626 parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1); 627 parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2); 628 parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3); 629 parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4); 630 parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5); 631 parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6); 632 parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7); 633 parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8); 634 635 if (DWC3_IP_IS(DWC32)) 636 parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9); 637} 638 639static void dwc3_config_soc_bus(struct dwc3 *dwc) 640{ 641 if (dwc->gsbuscfg0_reqinfo != DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED) { 642 u32 reg; 643 644 reg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0); 645 reg &= ~DWC3_GSBUSCFG0_REQINFO(~0); 646 reg |= DWC3_GSBUSCFG0_REQINFO(dwc->gsbuscfg0_reqinfo); 647 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, reg); 648 } 649} 650 651static int dwc3_core_ulpi_init(struct dwc3 *dwc) 652{ 653 int intf; 654 int ret = 0; 655 656 intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3); 657 658 if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI || 659 (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI && 660 dwc->hsphy_interface && 661 !strncmp(dwc->hsphy_interface, "ulpi", 4))) 662 ret = dwc3_ulpi_init(dwc); 663 664 return ret; 665} 666 667static int dwc3_ss_phy_setup(struct dwc3 *dwc, int index) 668{ 669 u32 reg; 670 671 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(index)); 672 673 /* 674 * Make sure UX_EXIT_PX is cleared as that causes issues with some 675 * PHYs. Also, this bit is not supposed to be used in normal operation. 676 */ 677 reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX; 678 679 /* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */ 680 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 681 682 if (dwc->u2ss_inp3_quirk) 683 reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK; 684 685 if (dwc->dis_rxdet_inp3_quirk) 686 reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3; 687 688 if (dwc->req_p1p2p3_quirk) 689 reg |= DWC3_GUSB3PIPECTL_REQP1P2P3; 690 691 if (dwc->del_p1p2p3_quirk) 692 reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN; 693 694 if (dwc->del_phy_power_chg_quirk) 695 reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE; 696 697 if (dwc->lfps_filter_quirk) 698 reg |= DWC3_GUSB3PIPECTL_LFPSFILT; 699 700 if (dwc->rx_detect_poll_quirk) 701 reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL; 702 703 if (dwc->tx_de_emphasis_quirk) 704 reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis); 705 706 if (dwc->dis_del_phy_power_chg_quirk) 707 reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE; 708 709 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(index), reg); 710 711 return 0; 712} 713 714static int dwc3_hs_phy_setup(struct dwc3 *dwc, int index) 715{ 716 u32 reg; 717 718 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(index)); 719 720 /* Select the HS PHY interface */ 721 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) { 722 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI: 723 if (dwc->hsphy_interface && 724 !strncmp(dwc->hsphy_interface, "utmi", 4)) { 725 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI; 726 break; 727 } else if (dwc->hsphy_interface && 728 !strncmp(dwc->hsphy_interface, "ulpi", 4)) { 729 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI; 730 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(index), reg); 731 } else { 732 /* Relying on default value. */ 733 if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI)) 734 break; 735 } 736 fallthrough; 737 case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: 738 default: 739 break; 740 } 741 742 switch (dwc->hsphy_mode) { 743 case USBPHY_INTERFACE_MODE_UTMI: 744 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | 745 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); 746 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) | 747 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT); 748 break; 749 case USBPHY_INTERFACE_MODE_UTMIW: 750 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | 751 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); 752 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) | 753 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT); 754 break; 755 default: 756 break; 757 } 758 759 /* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */ 760 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 761 762 if (dwc->dis_enblslpm_quirk) 763 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 764 else 765 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM; 766 767 if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel) 768 reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS; 769 770 /* 771 * Some ULPI USB PHY does not support internal VBUS supply, to drive 772 * the CPEN pin requires the configuration of the ULPI DRVVBUSEXTERNAL 773 * bit of OTG_CTRL register. Controller configures the USB2 PHY 774 * ULPIEXTVBUSDRV bit[17] of the GUSB2PHYCFG register to drive vBus 775 * with an external supply. 776 */ 777 if (dwc->ulpi_ext_vbus_drv) 778 reg |= DWC3_GUSB2PHYCFG_ULPIEXTVBUSDRV; 779 780 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(index), reg); 781 782 return 0; 783} 784 785/** 786 * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core 787 * @dwc: Pointer to our controller context structure 788 * 789 * Returns 0 on success. The USB PHY interfaces are configured but not 790 * initialized. The PHY interfaces and the PHYs get initialized together with 791 * the core in dwc3_core_init. 792 */ 793static int dwc3_phy_setup(struct dwc3 *dwc) 794{ 795 int i; 796 int ret; 797 798 for (i = 0; i < dwc->num_usb3_ports; i++) { 799 ret = dwc3_ss_phy_setup(dwc, i); 800 if (ret) 801 return ret; 802 } 803 804 for (i = 0; i < dwc->num_usb2_ports; i++) { 805 ret = dwc3_hs_phy_setup(dwc, i); 806 if (ret) 807 return ret; 808 } 809 810 return 0; 811} 812 813static int dwc3_phy_init(struct dwc3 *dwc) 814{ 815 int ret; 816 int i; 817 int j; 818 819 usb_phy_init(dwc->usb2_phy); 820 usb_phy_init(dwc->usb3_phy); 821 822 for (i = 0; i < dwc->num_usb2_ports; i++) { 823 ret = phy_init(dwc->usb2_generic_phy[i]); 824 if (ret < 0) 825 goto err_exit_usb2_phy; 826 } 827 828 for (j = 0; j < dwc->num_usb3_ports; j++) { 829 ret = phy_init(dwc->usb3_generic_phy[j]); 830 if (ret < 0) 831 goto err_exit_usb3_phy; 832 } 833 834 /* 835 * Above DWC_usb3.0 1.94a, it is recommended to set 836 * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during 837 * coreConsultant configuration. So default value will be '0' when the 838 * core is reset. Application needs to set it to '1' after the core 839 * initialization is completed. 840 * 841 * Certain phy requires to be in P0 power state during initialization. 842 * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear 843 * prior to phy init to maintain in the P0 state. 844 * 845 * After phy initialization, some phy operations can only be executed 846 * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and 847 * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid 848 * blocking phy ops. 849 */ 850 if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) 851 dwc3_enable_susphy(dwc, true); 852 853 return 0; 854 855err_exit_usb3_phy: 856 while (--j >= 0) 857 phy_exit(dwc->usb3_generic_phy[j]); 858 859err_exit_usb2_phy: 860 while (--i >= 0) 861 phy_exit(dwc->usb2_generic_phy[i]); 862 863 usb_phy_shutdown(dwc->usb3_phy); 864 usb_phy_shutdown(dwc->usb2_phy); 865 866 return ret; 867} 868 869static void dwc3_phy_exit(struct dwc3 *dwc) 870{ 871 int i; 872 873 for (i = 0; i < dwc->num_usb3_ports; i++) 874 phy_exit(dwc->usb3_generic_phy[i]); 875 876 for (i = 0; i < dwc->num_usb2_ports; i++) 877 phy_exit(dwc->usb2_generic_phy[i]); 878 879 usb_phy_shutdown(dwc->usb3_phy); 880 usb_phy_shutdown(dwc->usb2_phy); 881} 882 883static int dwc3_phy_power_on(struct dwc3 *dwc) 884{ 885 int ret; 886 int i; 887 int j; 888 889 usb_phy_set_suspend(dwc->usb2_phy, 0); 890 usb_phy_set_suspend(dwc->usb3_phy, 0); 891 892 for (i = 0; i < dwc->num_usb2_ports; i++) { 893 ret = phy_power_on(dwc->usb2_generic_phy[i]); 894 if (ret < 0) 895 goto err_power_off_usb2_phy; 896 } 897 898 for (j = 0; j < dwc->num_usb3_ports; j++) { 899 ret = phy_power_on(dwc->usb3_generic_phy[j]); 900 if (ret < 0) 901 goto err_power_off_usb3_phy; 902 } 903 904 return 0; 905 906err_power_off_usb3_phy: 907 while (--j >= 0) 908 phy_power_off(dwc->usb3_generic_phy[j]); 909 910err_power_off_usb2_phy: 911 while (--i >= 0) 912 phy_power_off(dwc->usb2_generic_phy[i]); 913 914 usb_phy_set_suspend(dwc->usb3_phy, 1); 915 usb_phy_set_suspend(dwc->usb2_phy, 1); 916 917 return ret; 918} 919 920static void dwc3_phy_power_off(struct dwc3 *dwc) 921{ 922 int i; 923 924 for (i = 0; i < dwc->num_usb3_ports; i++) 925 phy_power_off(dwc->usb3_generic_phy[i]); 926 927 for (i = 0; i < dwc->num_usb2_ports; i++) 928 phy_power_off(dwc->usb2_generic_phy[i]); 929 930 usb_phy_set_suspend(dwc->usb3_phy, 1); 931 usb_phy_set_suspend(dwc->usb2_phy, 1); 932} 933 934static int dwc3_clk_enable(struct dwc3 *dwc) 935{ 936 int ret; 937 938 ret = clk_prepare_enable(dwc->bus_clk); 939 if (ret) 940 return ret; 941 942 ret = clk_prepare_enable(dwc->ref_clk); 943 if (ret) 944 goto disable_bus_clk; 945 946 ret = clk_prepare_enable(dwc->susp_clk); 947 if (ret) 948 goto disable_ref_clk; 949 950 ret = clk_prepare_enable(dwc->utmi_clk); 951 if (ret) 952 goto disable_susp_clk; 953 954 ret = clk_prepare_enable(dwc->pipe_clk); 955 if (ret) 956 goto disable_utmi_clk; 957 958 return 0; 959 960disable_utmi_clk: 961 clk_disable_unprepare(dwc->utmi_clk); 962disable_susp_clk: 963 clk_disable_unprepare(dwc->susp_clk); 964disable_ref_clk: 965 clk_disable_unprepare(dwc->ref_clk); 966disable_bus_clk: 967 clk_disable_unprepare(dwc->bus_clk); 968 return ret; 969} 970 971static void dwc3_clk_disable(struct dwc3 *dwc) 972{ 973 clk_disable_unprepare(dwc->pipe_clk); 974 clk_disable_unprepare(dwc->utmi_clk); 975 clk_disable_unprepare(dwc->susp_clk); 976 clk_disable_unprepare(dwc->ref_clk); 977 clk_disable_unprepare(dwc->bus_clk); 978} 979 980void dwc3_core_exit(struct dwc3 *dwc) 981{ 982 dwc3_event_buffers_cleanup(dwc); 983 dwc3_phy_power_off(dwc); 984 dwc3_phy_exit(dwc); 985 dwc3_clk_disable(dwc); 986 reset_control_assert(dwc->reset); 987} 988EXPORT_SYMBOL_GPL(dwc3_core_exit); 989 990static bool dwc3_core_is_valid(struct dwc3 *dwc) 991{ 992 u32 reg; 993 994 reg = dwc3_readl(dwc->regs, DWC3_GSNPSID); 995 dwc->ip = DWC3_GSNPS_ID(reg); 996 if (dwc->ip == DWC4_IP) 997 dwc->ip = DWC32_IP; 998 999 /* This should read as U3 followed by revision number */ 1000 if (DWC3_IP_IS(DWC3)) { 1001 dwc->revision = reg; 1002 } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) { 1003 dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER); 1004 dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE); 1005 } else { 1006 return false; 1007 } 1008 1009 return true; 1010} 1011 1012static void dwc3_core_setup_global_control(struct dwc3 *dwc) 1013{ 1014 unsigned int power_opt; 1015 unsigned int hw_mode; 1016 u32 reg; 1017 1018 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 1019 reg &= ~DWC3_GCTL_SCALEDOWN_MASK; 1020 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 1021 power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 1022 1023 switch (power_opt) { 1024 case DWC3_GHWPARAMS1_EN_PWROPT_CLK: 1025 /** 1026 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an 1027 * issue which would cause xHCI compliance tests to fail. 1028 * 1029 * Because of that we cannot enable clock gating on such 1030 * configurations. 1031 * 1032 * Refers to: 1033 * 1034 * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based 1035 * SOF/ITP Mode Used 1036 */ 1037 if ((dwc->dr_mode == USB_DR_MODE_HOST || 1038 dwc->dr_mode == USB_DR_MODE_OTG) && 1039 DWC3_VER_IS_WITHIN(DWC3, 210A, 250A)) 1040 reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC; 1041 else 1042 reg &= ~DWC3_GCTL_DSBLCLKGTNG; 1043 break; 1044 case DWC3_GHWPARAMS1_EN_PWROPT_HIB: 1045 /* 1046 * REVISIT Enabling this bit so that host-mode hibernation 1047 * will work. Device-mode hibernation is not yet implemented. 1048 */ 1049 reg |= DWC3_GCTL_GBLHIBERNATIONEN; 1050 break; 1051 default: 1052 /* nothing */ 1053 break; 1054 } 1055 1056 /* 1057 * This is a workaround for STAR#4846132, which only affects 1058 * DWC_usb31 version2.00a operating in host mode. 1059 * 1060 * There is a problem in DWC_usb31 version 2.00a operating 1061 * in host mode that would cause a CSR read timeout When CSR 1062 * read coincides with RAM Clock Gating Entry. By disable 1063 * Clock Gating, sacrificing power consumption for normal 1064 * operation. 1065 */ 1066 if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO && 1067 hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A)) 1068 reg |= DWC3_GCTL_DSBLCLKGTNG; 1069 1070 /* check if current dwc3 is on simulation board */ 1071 if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) { 1072 dev_info(dwc->dev, "Running with FPGA optimizations\n"); 1073 dwc->is_fpga = true; 1074 } 1075 1076 WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga, 1077 "disable_scramble cannot be used on non-FPGA builds\n"); 1078 1079 if (dwc->disable_scramble_quirk && dwc->is_fpga) 1080 reg |= DWC3_GCTL_DISSCRAMBLE; 1081 else 1082 reg &= ~DWC3_GCTL_DISSCRAMBLE; 1083 1084 if (dwc->u2exit_lfps_quirk) 1085 reg |= DWC3_GCTL_U2EXIT_LFPS; 1086 1087 /* 1088 * WORKAROUND: DWC3 revisions <1.90a have a bug 1089 * where the device can fail to connect at SuperSpeed 1090 * and falls back to high-speed mode which causes 1091 * the device to enter a Connect/Disconnect loop 1092 */ 1093 if (DWC3_VER_IS_PRIOR(DWC3, 190A)) 1094 reg |= DWC3_GCTL_U2RSTECN; 1095 1096 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 1097} 1098 1099static int dwc3_core_get_phy(struct dwc3 *dwc); 1100static int dwc3_core_ulpi_init(struct dwc3 *dwc); 1101 1102/* set global incr burst type configuration registers */ 1103static void dwc3_set_incr_burst_type(struct dwc3 *dwc) 1104{ 1105 struct device *dev = dwc->dev; 1106 /* incrx_mode : for INCR burst type. */ 1107 bool incrx_mode; 1108 /* incrx_size : for size of INCRX burst. */ 1109 u32 incrx_size; 1110 u32 *vals; 1111 u32 cfg; 1112 int ntype; 1113 int ret; 1114 int i; 1115 1116 cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0); 1117 1118 /* 1119 * Handle property "snps,incr-burst-type-adjustment". 1120 * Get the number of value from this property: 1121 * result <= 0, means this property is not supported. 1122 * result = 1, means INCRx burst mode supported. 1123 * result > 1, means undefined length burst mode supported. 1124 */ 1125 ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment"); 1126 if (ntype <= 0) 1127 return; 1128 1129 vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL); 1130 if (!vals) 1131 return; 1132 1133 /* Get INCR burst type, and parse it */ 1134 ret = device_property_read_u32_array(dev, 1135 "snps,incr-burst-type-adjustment", vals, ntype); 1136 if (ret) { 1137 kfree(vals); 1138 dev_err(dev, "Error to get property\n"); 1139 return; 1140 } 1141 1142 incrx_size = *vals; 1143 1144 if (ntype > 1) { 1145 /* INCRX (undefined length) burst mode */ 1146 incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE; 1147 for (i = 1; i < ntype; i++) { 1148 if (vals[i] > incrx_size) 1149 incrx_size = vals[i]; 1150 } 1151 } else { 1152 /* INCRX burst mode */ 1153 incrx_mode = INCRX_BURST_MODE; 1154 } 1155 1156 kfree(vals); 1157 1158 /* Enable Undefined Length INCR Burst and Enable INCRx Burst */ 1159 cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK; 1160 if (incrx_mode) 1161 cfg |= DWC3_GSBUSCFG0_INCRBRSTENA; 1162 switch (incrx_size) { 1163 case 256: 1164 cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA; 1165 break; 1166 case 128: 1167 cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA; 1168 break; 1169 case 64: 1170 cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA; 1171 break; 1172 case 32: 1173 cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA; 1174 break; 1175 case 16: 1176 cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA; 1177 break; 1178 case 8: 1179 cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA; 1180 break; 1181 case 4: 1182 cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA; 1183 break; 1184 case 1: 1185 break; 1186 default: 1187 dev_err(dev, "Invalid property\n"); 1188 break; 1189 } 1190 1191 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg); 1192} 1193 1194static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc) 1195{ 1196 u32 scale; 1197 u32 reg; 1198 1199 if (!dwc->susp_clk) 1200 return; 1201 1202 /* 1203 * The power down scale field specifies how many suspend_clk 1204 * periods fit into a 16KHz clock period. When performing 1205 * the division, round up the remainder. 1206 * 1207 * The power down scale value is calculated using the fastest 1208 * frequency of the suspend_clk. If it isn't fixed (but within 1209 * the accuracy requirement), the driver may not know the max 1210 * rate of the suspend_clk, so only update the power down scale 1211 * if the default is less than the calculated value from 1212 * clk_get_rate() or if the default is questionably high 1213 * (3x or more) to be within the requirement. 1214 */ 1215 scale = DIV_ROUND_UP(clk_get_rate(dwc->susp_clk), 16000); 1216 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 1217 if ((reg & DWC3_GCTL_PWRDNSCALE_MASK) < DWC3_GCTL_PWRDNSCALE(scale) || 1218 (reg & DWC3_GCTL_PWRDNSCALE_MASK) > DWC3_GCTL_PWRDNSCALE(scale*3)) { 1219 reg &= ~(DWC3_GCTL_PWRDNSCALE_MASK); 1220 reg |= DWC3_GCTL_PWRDNSCALE(scale); 1221 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 1222 } 1223} 1224 1225static void dwc3_config_threshold(struct dwc3 *dwc) 1226{ 1227 u32 reg; 1228 u8 rx_thr_num; 1229 u8 rx_maxburst; 1230 u8 tx_thr_num; 1231 u8 tx_maxburst; 1232 1233 /* 1234 * Must config both number of packets and max burst settings to enable 1235 * RX and/or TX threshold. 1236 */ 1237 if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) { 1238 rx_thr_num = dwc->rx_thr_num_pkt_prd; 1239 rx_maxburst = dwc->rx_max_burst_prd; 1240 tx_thr_num = dwc->tx_thr_num_pkt_prd; 1241 tx_maxburst = dwc->tx_max_burst_prd; 1242 1243 if (rx_thr_num && rx_maxburst) { 1244 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1245 reg |= DWC31_RXTHRNUMPKTSEL_PRD; 1246 1247 reg &= ~DWC31_RXTHRNUMPKT_PRD(~0); 1248 reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num); 1249 1250 reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0); 1251 reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst); 1252 1253 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1254 } 1255 1256 if (tx_thr_num && tx_maxburst) { 1257 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1258 reg |= DWC31_TXTHRNUMPKTSEL_PRD; 1259 1260 reg &= ~DWC31_TXTHRNUMPKT_PRD(~0); 1261 reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num); 1262 1263 reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0); 1264 reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst); 1265 1266 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1267 } 1268 } 1269 1270 rx_thr_num = dwc->rx_thr_num_pkt; 1271 rx_maxburst = dwc->rx_max_burst; 1272 tx_thr_num = dwc->tx_thr_num_pkt; 1273 tx_maxburst = dwc->tx_max_burst; 1274 1275 if (DWC3_IP_IS(DWC3)) { 1276 if (rx_thr_num && rx_maxburst) { 1277 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1278 reg |= DWC3_GRXTHRCFG_PKTCNTSEL; 1279 1280 reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0); 1281 reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num); 1282 1283 reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0); 1284 reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst); 1285 1286 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1287 } 1288 1289 if (tx_thr_num && tx_maxburst) { 1290 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1291 reg |= DWC3_GTXTHRCFG_PKTCNTSEL; 1292 1293 reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0); 1294 reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num); 1295 1296 reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0); 1297 reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst); 1298 1299 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1300 } 1301 } else { 1302 if (rx_thr_num && rx_maxburst) { 1303 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1304 reg |= DWC31_GRXTHRCFG_PKTCNTSEL; 1305 1306 reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0); 1307 reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num); 1308 1309 reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0); 1310 reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst); 1311 1312 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1313 } 1314 1315 if (tx_thr_num && tx_maxburst) { 1316 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1317 reg |= DWC31_GTXTHRCFG_PKTCNTSEL; 1318 1319 reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0); 1320 reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num); 1321 1322 reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0); 1323 reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst); 1324 1325 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1326 } 1327 } 1328} 1329 1330/** 1331 * dwc3_core_init - Low-level initialization of DWC3 Core 1332 * @dwc: Pointer to our controller context structure 1333 * 1334 * Returns 0 on success otherwise negative errno. 1335 */ 1336int dwc3_core_init(struct dwc3 *dwc) 1337{ 1338 unsigned int hw_mode; 1339 u32 reg; 1340 int ret; 1341 1342 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 1343 1344 /* 1345 * Write Linux Version Code to our GUID register so it's easy to figure 1346 * out which kernel version a bug was found. 1347 */ 1348 dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); 1349 1350 ret = dwc3_phy_setup(dwc); 1351 if (ret) 1352 return ret; 1353 1354 if (!dwc->ulpi_ready) { 1355 ret = dwc3_core_ulpi_init(dwc); 1356 if (ret) { 1357 if (ret == -ETIMEDOUT) { 1358 dwc3_core_soft_reset(dwc); 1359 ret = -EPROBE_DEFER; 1360 } 1361 return ret; 1362 } 1363 dwc->ulpi_ready = true; 1364 } 1365 1366 if (!dwc->phys_ready) { 1367 ret = dwc3_core_get_phy(dwc); 1368 if (ret) 1369 goto err_exit_ulpi; 1370 dwc->phys_ready = true; 1371 } 1372 1373 ret = dwc3_phy_init(dwc); 1374 if (ret) 1375 goto err_exit_ulpi; 1376 1377 ret = dwc3_core_soft_reset(dwc); 1378 if (ret) 1379 goto err_exit_phy; 1380 1381 dwc3_core_setup_global_control(dwc); 1382 dwc3_core_num_eps(dwc); 1383 1384 /* Set power down scale of suspend_clk */ 1385 dwc3_set_power_down_clk_scale(dwc); 1386 1387 /* Adjust Frame Length */ 1388 dwc3_frame_length_adjustment(dwc); 1389 1390 /* Adjust Reference Clock Period */ 1391 dwc3_ref_clk_period(dwc); 1392 1393 dwc3_set_incr_burst_type(dwc); 1394 1395 dwc3_config_soc_bus(dwc); 1396 1397 ret = dwc3_phy_power_on(dwc); 1398 if (ret) 1399 goto err_exit_phy; 1400 1401 ret = dwc3_event_buffers_setup(dwc); 1402 if (ret) { 1403 dev_err(dwc->dev, "failed to setup event buffers\n"); 1404 goto err_power_off_phy; 1405 } 1406 1407 /* 1408 * ENDXFER polling is available on version 3.10a and later of 1409 * the DWC_usb3 controller. It is NOT available in the 1410 * DWC_usb31 controller. 1411 */ 1412 if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) { 1413 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); 1414 reg |= DWC3_GUCTL2_RST_ACTBITLATER; 1415 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); 1416 } 1417 1418 /* 1419 * STAR 9001285599: This issue affects DWC_usb3 version 3.20a 1420 * only. If the PM TIMER ECM is enabled through GUCTL2[19], the 1421 * link compliance test (TD7.21) may fail. If the ECN is not 1422 * enabled (GUCTL2[19] = 0), the controller will use the old timer 1423 * value (5us), which is still acceptable for the link compliance 1424 * test. Therefore, do not enable PM TIMER ECM in 3.20a by 1425 * setting GUCTL2[19] by default; instead, use GUCTL2[19] = 0. 1426 */ 1427 if (DWC3_VER_IS(DWC3, 320A)) { 1428 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); 1429 reg &= ~DWC3_GUCTL2_LC_TIMER; 1430 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); 1431 } 1432 1433 /* 1434 * When configured in HOST mode, after issuing U3/L2 exit controller 1435 * fails to send proper CRC checksum in CRC5 field. Because of this 1436 * behaviour Transaction Error is generated, resulting in reset and 1437 * re-enumeration of usb device attached. All the termsel, xcvrsel, 1438 * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1 1439 * will correct this problem. This option is to support certain 1440 * legacy ULPI PHYs. 1441 */ 1442 if (dwc->resume_hs_terminations) { 1443 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); 1444 reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST; 1445 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); 1446 } 1447 1448 if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) { 1449 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); 1450 1451 /* 1452 * Enable hardware control of sending remote wakeup 1453 * in HS when the device is in the L1 state. 1454 */ 1455 if (!DWC3_VER_IS_PRIOR(DWC3, 290A)) 1456 reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW; 1457 1458 /* 1459 * Decouple USB 2.0 L1 & L2 events which will allow for 1460 * gadget driver to only receive U3/L2 suspend & wakeup 1461 * events and prevent the more frequent L1 LPM transitions 1462 * from interrupting the driver. 1463 */ 1464 if (!DWC3_VER_IS_PRIOR(DWC3, 300A)) 1465 reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT; 1466 1467 if (dwc->dis_tx_ipgap_linecheck_quirk) 1468 reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; 1469 1470 if (dwc->parkmode_disable_ss_quirk) 1471 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; 1472 1473 if (dwc->parkmode_disable_hs_quirk) 1474 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_HS; 1475 1476 if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY)) { 1477 if (dwc->maximum_speed == USB_SPEED_FULL || 1478 dwc->maximum_speed == USB_SPEED_HIGH) 1479 reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK; 1480 else 1481 reg &= ~DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK; 1482 } 1483 1484 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); 1485 } 1486 1487 dwc3_config_threshold(dwc); 1488 1489 if (hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && 1490 (DWC3_IP_IS(DWC31)) && 1491 dwc->maximum_speed == USB_SPEED_SUPER) { 1492 int i; 1493 1494 for (i = 0; i < dwc->num_usb3_ports; i++) { 1495 reg = dwc3_readl(dwc->regs, DWC3_LLUCTL(i)); 1496 reg |= DWC3_LLUCTL_FORCE_GEN1; 1497 dwc3_writel(dwc->regs, DWC3_LLUCTL(i), reg); 1498 } 1499 } 1500 1501 /* 1502 * STAR 9001346572: This issue affects DWC_usb31 versions 1.80a and 1503 * prior. When an active endpoint not currently cached in the host 1504 * controller is chosen to be cached to the same index as an endpoint 1505 * receiving NAKs, the endpoint receiving NAKs enters continuous 1506 * retry mode. This prevents it from being evicted from the host 1507 * controller cache, blocking the new endpoint from being cached and 1508 * serviced. 1509 * 1510 * To resolve this, for controller versions 1.70a and 1.80a, set the 1511 * GUCTL3 bit[16] (USB2.0 Internal Retry Disable) to 1. This bit 1512 * disables the USB2.0 internal retry feature. The GUCTL3[16] register 1513 * function is available only from version 1.70a. 1514 */ 1515 if (DWC3_VER_IS_WITHIN(DWC31, 170A, 180A)) { 1516 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 1517 reg |= DWC3_GUCTL3_USB20_RETRY_DISABLE; 1518 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 1519 } 1520 1521 return 0; 1522 1523err_power_off_phy: 1524 dwc3_phy_power_off(dwc); 1525err_exit_phy: 1526 dwc3_phy_exit(dwc); 1527err_exit_ulpi: 1528 dwc3_ulpi_exit(dwc); 1529 1530 return ret; 1531} 1532EXPORT_SYMBOL_GPL(dwc3_core_init); 1533 1534static int dwc3_core_get_phy(struct dwc3 *dwc) 1535{ 1536 struct device *dev = dwc->dev; 1537 struct device_node *node = dev->of_node; 1538 char phy_name[9]; 1539 int ret; 1540 u8 i; 1541 1542 if (node) { 1543 dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0); 1544 dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1); 1545 } else { 1546 dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); 1547 dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3); 1548 } 1549 1550 if (IS_ERR(dwc->usb2_phy)) { 1551 ret = PTR_ERR(dwc->usb2_phy); 1552 if (ret == -ENXIO || ret == -ENODEV) 1553 dwc->usb2_phy = NULL; 1554 else 1555 return dev_err_probe(dev, ret, "no usb2 phy configured\n"); 1556 } 1557 1558 if (IS_ERR(dwc->usb3_phy)) { 1559 ret = PTR_ERR(dwc->usb3_phy); 1560 if (ret == -ENXIO || ret == -ENODEV) 1561 dwc->usb3_phy = NULL; 1562 else 1563 return dev_err_probe(dev, ret, "no usb3 phy configured\n"); 1564 } 1565 1566 for (i = 0; i < dwc->num_usb2_ports; i++) { 1567 if (dwc->num_usb2_ports == 1) 1568 snprintf(phy_name, sizeof(phy_name), "usb2-phy"); 1569 else 1570 snprintf(phy_name, sizeof(phy_name), "usb2-%u", i); 1571 1572 dwc->usb2_generic_phy[i] = devm_phy_get(dev, phy_name); 1573 if (IS_ERR(dwc->usb2_generic_phy[i])) { 1574 ret = PTR_ERR(dwc->usb2_generic_phy[i]); 1575 if (ret == -ENOSYS || ret == -ENODEV) 1576 dwc->usb2_generic_phy[i] = NULL; 1577 else 1578 return dev_err_probe(dev, ret, "failed to lookup phy %s\n", 1579 phy_name); 1580 } 1581 } 1582 1583 for (i = 0; i < dwc->num_usb3_ports; i++) { 1584 if (dwc->num_usb3_ports == 1) 1585 snprintf(phy_name, sizeof(phy_name), "usb3-phy"); 1586 else 1587 snprintf(phy_name, sizeof(phy_name), "usb3-%u", i); 1588 1589 dwc->usb3_generic_phy[i] = devm_phy_get(dev, phy_name); 1590 if (IS_ERR(dwc->usb3_generic_phy[i])) { 1591 ret = PTR_ERR(dwc->usb3_generic_phy[i]); 1592 if (ret == -ENOSYS || ret == -ENODEV) 1593 dwc->usb3_generic_phy[i] = NULL; 1594 else 1595 return dev_err_probe(dev, ret, "failed to lookup phy %s\n", 1596 phy_name); 1597 } 1598 } 1599 1600 return 0; 1601} 1602 1603static int dwc3_core_init_mode(struct dwc3 *dwc) 1604{ 1605 struct device *dev = dwc->dev; 1606 int ret; 1607 int i; 1608 1609 switch (dwc->dr_mode) { 1610 case USB_DR_MODE_PERIPHERAL: 1611 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false); 1612 1613 if (dwc->usb2_phy) 1614 otg_set_vbus(dwc->usb2_phy->otg, false); 1615 phy_set_mode(dwc->usb2_generic_phy[0], PHY_MODE_USB_DEVICE); 1616 phy_set_mode(dwc->usb3_generic_phy[0], PHY_MODE_USB_DEVICE); 1617 1618 ret = dwc3_gadget_init(dwc); 1619 if (ret) 1620 return dev_err_probe(dev, ret, "failed to initialize gadget\n"); 1621 break; 1622 case USB_DR_MODE_HOST: 1623 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false); 1624 1625 if (dwc->usb2_phy) 1626 otg_set_vbus(dwc->usb2_phy->otg, true); 1627 for (i = 0; i < dwc->num_usb2_ports; i++) 1628 phy_set_mode(dwc->usb2_generic_phy[i], PHY_MODE_USB_HOST); 1629 for (i = 0; i < dwc->num_usb3_ports; i++) 1630 phy_set_mode(dwc->usb3_generic_phy[i], PHY_MODE_USB_HOST); 1631 1632 ret = dwc3_host_init(dwc); 1633 if (ret) 1634 return dev_err_probe(dev, ret, "failed to initialize host\n"); 1635 break; 1636 case USB_DR_MODE_OTG: 1637 INIT_WORK(&dwc->drd_work, __dwc3_set_mode); 1638 ret = dwc3_drd_init(dwc); 1639 if (ret) 1640 return dev_err_probe(dev, ret, "failed to initialize dual-role\n"); 1641 break; 1642 default: 1643 dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode); 1644 return -EINVAL; 1645 } 1646 1647 return 0; 1648} 1649 1650static void dwc3_core_exit_mode(struct dwc3 *dwc) 1651{ 1652 switch (dwc->dr_mode) { 1653 case USB_DR_MODE_PERIPHERAL: 1654 dwc3_gadget_exit(dwc); 1655 break; 1656 case USB_DR_MODE_HOST: 1657 dwc3_host_exit(dwc); 1658 break; 1659 case USB_DR_MODE_OTG: 1660 dwc3_drd_exit(dwc); 1661 break; 1662 default: 1663 /* do nothing */ 1664 break; 1665 } 1666 1667 /* de-assert DRVVBUS for HOST and OTG mode */ 1668 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); 1669} 1670 1671static void dwc3_get_software_properties(struct dwc3 *dwc, 1672 const struct dwc3_properties *properties) 1673{ 1674 struct device *tmpdev; 1675 u16 gsbuscfg0_reqinfo; 1676 int ret; 1677 1678 dwc->gsbuscfg0_reqinfo = DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED; 1679 1680 if (properties->gsbuscfg0_reqinfo != 1681 DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED) { 1682 dwc->gsbuscfg0_reqinfo = properties->gsbuscfg0_reqinfo; 1683 return; 1684 } 1685 1686 /* 1687 * Iterate over all parent nodes for finding swnode properties 1688 * and non-DT (non-ABI) properties. 1689 */ 1690 for (tmpdev = dwc->dev; tmpdev; tmpdev = tmpdev->parent) { 1691 ret = device_property_read_u16(tmpdev, 1692 "snps,gsbuscfg0-reqinfo", 1693 &gsbuscfg0_reqinfo); 1694 if (!ret) 1695 dwc->gsbuscfg0_reqinfo = gsbuscfg0_reqinfo; 1696 } 1697} 1698 1699static void dwc3_get_properties(struct dwc3 *dwc) 1700{ 1701 struct device *dev = dwc->dev; 1702 u8 lpm_nyet_threshold; 1703 u8 tx_de_emphasis; 1704 u8 hird_threshold; 1705 u8 rx_thr_num_pkt = 0; 1706 u8 rx_max_burst = 0; 1707 u8 tx_thr_num_pkt = 0; 1708 u8 tx_max_burst = 0; 1709 u8 rx_thr_num_pkt_prd = 0; 1710 u8 rx_max_burst_prd = 0; 1711 u8 tx_thr_num_pkt_prd = 0; 1712 u8 tx_max_burst_prd = 0; 1713 u8 tx_fifo_resize_max_num; 1714 u16 num_hc_interrupters; 1715 1716 /* default to highest possible threshold */ 1717 lpm_nyet_threshold = 0xf; 1718 1719 /* default to -3.5dB de-emphasis */ 1720 tx_de_emphasis = 1; 1721 1722 /* 1723 * default to assert utmi_sleep_n and use maximum allowed HIRD 1724 * threshold value of 0b1100 1725 */ 1726 hird_threshold = 12; 1727 1728 /* 1729 * default to a TXFIFO size large enough to fit 6 max packets. This 1730 * allows for systems with larger bus latencies to have some headroom 1731 * for endpoints that have a large bMaxBurst value. 1732 */ 1733 tx_fifo_resize_max_num = 6; 1734 1735 /* default to a single XHCI interrupter */ 1736 num_hc_interrupters = 1; 1737 1738 dwc->maximum_speed = usb_get_maximum_speed(dev); 1739 dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev); 1740 dwc->dr_mode = usb_get_dr_mode(dev); 1741 dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); 1742 1743 dwc->sysdev_is_parent = device_property_read_bool(dev, 1744 "linux,sysdev_is_parent"); 1745 if (dwc->sysdev_is_parent) 1746 dwc->sysdev = dwc->dev->parent; 1747 else 1748 dwc->sysdev = dwc->dev; 1749 1750 dwc->sys_wakeup = device_may_wakeup(dwc->sysdev); 1751 1752 dwc->has_lpm_erratum = device_property_read_bool(dev, 1753 "snps,has-lpm-erratum"); 1754 device_property_read_u8(dev, "snps,lpm-nyet-threshold", 1755 &lpm_nyet_threshold); 1756 dwc->is_utmi_l1_suspend = device_property_read_bool(dev, 1757 "snps,is-utmi-l1-suspend"); 1758 device_property_read_u8(dev, "snps,hird-threshold", 1759 &hird_threshold); 1760 dwc->dis_start_transfer_quirk = device_property_read_bool(dev, 1761 "snps,dis-start-transfer-quirk"); 1762 dwc->usb3_lpm_capable = device_property_read_bool(dev, 1763 "snps,usb3_lpm_capable"); 1764 dwc->usb2_lpm_disable = device_property_read_bool(dev, 1765 "snps,usb2-lpm-disable"); 1766 dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev, 1767 "snps,usb2-gadget-lpm-disable"); 1768 device_property_read_u8(dev, "snps,rx-thr-num-pkt", 1769 &rx_thr_num_pkt); 1770 device_property_read_u8(dev, "snps,rx-max-burst", 1771 &rx_max_burst); 1772 device_property_read_u8(dev, "snps,tx-thr-num-pkt", 1773 &tx_thr_num_pkt); 1774 device_property_read_u8(dev, "snps,tx-max-burst", 1775 &tx_max_burst); 1776 device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd", 1777 &rx_thr_num_pkt_prd); 1778 device_property_read_u8(dev, "snps,rx-max-burst-prd", 1779 &rx_max_burst_prd); 1780 device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd", 1781 &tx_thr_num_pkt_prd); 1782 device_property_read_u8(dev, "snps,tx-max-burst-prd", 1783 &tx_max_burst_prd); 1784 device_property_read_u16(dev, "num-hc-interrupters", 1785 &num_hc_interrupters); 1786 /* DWC3 core allowed to have a max of 8 interrupters */ 1787 if (num_hc_interrupters > 8) 1788 num_hc_interrupters = 8; 1789 1790 dwc->do_fifo_resize = device_property_read_bool(dev, 1791 "tx-fifo-resize"); 1792 if (dwc->do_fifo_resize) 1793 device_property_read_u8(dev, "tx-fifo-max-num", 1794 &tx_fifo_resize_max_num); 1795 1796 dwc->disable_scramble_quirk = device_property_read_bool(dev, 1797 "snps,disable_scramble_quirk"); 1798 dwc->u2exit_lfps_quirk = device_property_read_bool(dev, 1799 "snps,u2exit_lfps_quirk"); 1800 dwc->u2ss_inp3_quirk = device_property_read_bool(dev, 1801 "snps,u2ss_inp3_quirk"); 1802 dwc->req_p1p2p3_quirk = device_property_read_bool(dev, 1803 "snps,req_p1p2p3_quirk"); 1804 dwc->del_p1p2p3_quirk = device_property_read_bool(dev, 1805 "snps,del_p1p2p3_quirk"); 1806 dwc->del_phy_power_chg_quirk = device_property_read_bool(dev, 1807 "snps,del_phy_power_chg_quirk"); 1808 dwc->lfps_filter_quirk = device_property_read_bool(dev, 1809 "snps,lfps_filter_quirk"); 1810 dwc->rx_detect_poll_quirk = device_property_read_bool(dev, 1811 "snps,rx_detect_poll_quirk"); 1812 dwc->dis_u3_susphy_quirk = device_property_read_bool(dev, 1813 "snps,dis_u3_susphy_quirk"); 1814 dwc->dis_u2_susphy_quirk = device_property_read_bool(dev, 1815 "snps,dis_u2_susphy_quirk"); 1816 dwc->dis_enblslpm_quirk = device_property_read_bool(dev, 1817 "snps,dis_enblslpm_quirk"); 1818 dwc->dis_u1_entry_quirk = device_property_read_bool(dev, 1819 "snps,dis-u1-entry-quirk"); 1820 dwc->dis_u2_entry_quirk = device_property_read_bool(dev, 1821 "snps,dis-u2-entry-quirk"); 1822 dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev, 1823 "snps,dis_rxdet_inp3_quirk"); 1824 dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev, 1825 "snps,dis-u2-freeclk-exists-quirk"); 1826 dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev, 1827 "snps,dis-del-phy-power-chg-quirk"); 1828 dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev, 1829 "snps,dis-tx-ipgap-linecheck-quirk"); 1830 dwc->resume_hs_terminations = device_property_read_bool(dev, 1831 "snps,resume-hs-terminations"); 1832 dwc->ulpi_ext_vbus_drv = device_property_read_bool(dev, 1833 "snps,ulpi-ext-vbus-drv"); 1834 dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev, 1835 "snps,parkmode-disable-ss-quirk"); 1836 dwc->parkmode_disable_hs_quirk = device_property_read_bool(dev, 1837 "snps,parkmode-disable-hs-quirk"); 1838 dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev, 1839 "snps,gfladj-refclk-lpm-sel-quirk"); 1840 1841 dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, 1842 "snps,tx_de_emphasis_quirk"); 1843 device_property_read_u8(dev, "snps,tx_de_emphasis", 1844 &tx_de_emphasis); 1845 device_property_read_string(dev, "snps,hsphy_interface", 1846 &dwc->hsphy_interface); 1847 device_property_read_u32(dev, "snps,quirk-frame-length-adjustment", 1848 &dwc->fladj); 1849 device_property_read_u32(dev, "snps,ref-clock-period-ns", 1850 &dwc->ref_clk_per); 1851 1852 dwc->dis_metastability_quirk = device_property_read_bool(dev, 1853 "snps,dis_metastability_quirk"); 1854 1855 dwc->dis_split_quirk = device_property_read_bool(dev, 1856 "snps,dis-split-quirk"); 1857 1858 dwc->lpm_nyet_threshold = lpm_nyet_threshold; 1859 dwc->tx_de_emphasis = tx_de_emphasis; 1860 1861 dwc->hird_threshold = hird_threshold; 1862 1863 dwc->rx_thr_num_pkt = rx_thr_num_pkt; 1864 dwc->rx_max_burst = rx_max_burst; 1865 1866 dwc->tx_thr_num_pkt = tx_thr_num_pkt; 1867 dwc->tx_max_burst = tx_max_burst; 1868 1869 dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd; 1870 dwc->rx_max_burst_prd = rx_max_burst_prd; 1871 1872 dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd; 1873 dwc->tx_max_burst_prd = tx_max_burst_prd; 1874 1875 dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num; 1876 1877 dwc->num_hc_interrupters = num_hc_interrupters; 1878} 1879 1880/* check whether the core supports IMOD */ 1881bool dwc3_has_imod(struct dwc3 *dwc) 1882{ 1883 return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) || 1884 DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) || 1885 DWC3_IP_IS(DWC32); 1886} 1887 1888static void dwc3_check_params(struct dwc3 *dwc) 1889{ 1890 struct device *dev = dwc->dev; 1891 unsigned int hwparam_gen = 1892 DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3); 1893 1894 /* 1895 * Enable IMOD for all supporting controllers. 1896 * 1897 * Particularly, DWC_usb3 v3.00a must enable this feature for 1898 * the following reason: 1899 * 1900 * Workaround for STAR 9000961433 which affects only version 1901 * 3.00a of the DWC_usb3 core. This prevents the controller 1902 * interrupt from being masked while handling events. IMOD 1903 * allows us to work around this issue. Enable it for the 1904 * affected version. 1905 */ 1906 if (dwc3_has_imod((dwc))) 1907 dwc->imod_interval = 1; 1908 1909 /* Check the maximum_speed parameter */ 1910 switch (dwc->maximum_speed) { 1911 case USB_SPEED_FULL: 1912 case USB_SPEED_HIGH: 1913 break; 1914 case USB_SPEED_SUPER: 1915 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) 1916 dev_warn(dev, "UDC doesn't support Gen 1\n"); 1917 break; 1918 case USB_SPEED_SUPER_PLUS: 1919 if ((DWC3_IP_IS(DWC32) && 1920 hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) || 1921 (!DWC3_IP_IS(DWC32) && 1922 hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) 1923 dev_warn(dev, "UDC doesn't support SSP\n"); 1924 break; 1925 default: 1926 dev_err(dev, "invalid maximum_speed parameter %d\n", 1927 dwc->maximum_speed); 1928 fallthrough; 1929 case USB_SPEED_UNKNOWN: 1930 switch (hwparam_gen) { 1931 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: 1932 dwc->maximum_speed = USB_SPEED_SUPER_PLUS; 1933 break; 1934 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: 1935 if (DWC3_IP_IS(DWC32)) 1936 dwc->maximum_speed = USB_SPEED_SUPER_PLUS; 1937 else 1938 dwc->maximum_speed = USB_SPEED_SUPER; 1939 break; 1940 case DWC3_GHWPARAMS3_SSPHY_IFC_DIS: 1941 dwc->maximum_speed = USB_SPEED_HIGH; 1942 break; 1943 default: 1944 dwc->maximum_speed = USB_SPEED_SUPER; 1945 break; 1946 } 1947 break; 1948 } 1949 1950 /* 1951 * Currently the controller does not have visibility into the HW 1952 * parameter to determine the maximum number of lanes the HW supports. 1953 * If the number of lanes is not specified in the device property, then 1954 * set the default to support dual-lane for DWC_usb32 and single-lane 1955 * for DWC_usb31 for super-speed-plus. 1956 */ 1957 if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) { 1958 switch (dwc->max_ssp_rate) { 1959 case USB_SSP_GEN_2x1: 1960 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1) 1961 dev_warn(dev, "UDC only supports Gen 1\n"); 1962 break; 1963 case USB_SSP_GEN_1x2: 1964 case USB_SSP_GEN_2x2: 1965 if (DWC3_IP_IS(DWC31)) 1966 dev_warn(dev, "UDC only supports single lane\n"); 1967 break; 1968 case USB_SSP_GEN_UNKNOWN: 1969 default: 1970 switch (hwparam_gen) { 1971 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: 1972 if (DWC3_IP_IS(DWC32)) 1973 dwc->max_ssp_rate = USB_SSP_GEN_2x2; 1974 else 1975 dwc->max_ssp_rate = USB_SSP_GEN_2x1; 1976 break; 1977 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: 1978 if (DWC3_IP_IS(DWC32)) 1979 dwc->max_ssp_rate = USB_SSP_GEN_1x2; 1980 break; 1981 } 1982 break; 1983 } 1984 } 1985} 1986 1987static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) 1988{ 1989 struct device *dev = dwc->dev; 1990 struct device_node *np_phy; 1991 struct extcon_dev *edev = NULL; 1992 const char *name; 1993 1994 if (device_property_present(dev, "extcon")) 1995 return extcon_get_edev_by_phandle(dev, 0); 1996 1997 /* 1998 * Device tree platforms should get extcon via phandle. 1999 * On ACPI platforms, we get the name from a device property. 2000 * This device property is for kernel internal use only and 2001 * is expected to be set by the glue code. 2002 */ 2003 if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) 2004 return extcon_get_extcon_dev(name); 2005 2006 /* 2007 * Check explicitly if "usb-role-switch" is used since 2008 * extcon_find_edev_by_node() can not be used to check the absence of 2009 * an extcon device. In the absence of an device it will always return 2010 * EPROBE_DEFER. 2011 */ 2012 if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) && 2013 device_property_read_bool(dev, "usb-role-switch")) 2014 return NULL; 2015 2016 /* 2017 * Try to get an extcon device from the USB PHY controller's "port" 2018 * node. Check if it has the "port" node first, to avoid printing the 2019 * error message from underlying code, as it's a valid case: extcon 2020 * device (and "port" node) may be missing in case of "usb-role-switch" 2021 * or OTG mode. 2022 */ 2023 np_phy = of_parse_phandle(dev->of_node, "phys", 0); 2024 if (of_graph_is_present(np_phy)) { 2025 struct device_node *np_conn; 2026 2027 np_conn = of_graph_get_remote_node(np_phy, -1, -1); 2028 if (np_conn) 2029 edev = extcon_find_edev_by_node(np_conn); 2030 of_node_put(np_conn); 2031 } 2032 of_node_put(np_phy); 2033 2034 return edev; 2035} 2036 2037static int dwc3_get_clocks(struct dwc3 *dwc) 2038{ 2039 struct device *dev = dwc->dev; 2040 2041 if (!dev->of_node) 2042 return 0; 2043 2044 /* 2045 * Clocks are optional, but new DT platforms should support all clocks 2046 * as required by the DT-binding. 2047 * Some devices have different clock names in legacy device trees, 2048 * check for them to retain backwards compatibility. 2049 */ 2050 dwc->bus_clk = devm_clk_get_optional(dev, "bus_early"); 2051 if (IS_ERR(dwc->bus_clk)) { 2052 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 2053 "could not get bus clock\n"); 2054 } 2055 2056 if (dwc->bus_clk == NULL) { 2057 dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk"); 2058 if (IS_ERR(dwc->bus_clk)) { 2059 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 2060 "could not get bus clock\n"); 2061 } 2062 } 2063 2064 dwc->ref_clk = devm_clk_get_optional(dev, "ref"); 2065 if (IS_ERR(dwc->ref_clk)) { 2066 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 2067 "could not get ref clock\n"); 2068 } 2069 2070 if (dwc->ref_clk == NULL) { 2071 dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk"); 2072 if (IS_ERR(dwc->ref_clk)) { 2073 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 2074 "could not get ref clock\n"); 2075 } 2076 } 2077 2078 dwc->susp_clk = devm_clk_get_optional(dev, "suspend"); 2079 if (IS_ERR(dwc->susp_clk)) { 2080 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 2081 "could not get suspend clock\n"); 2082 } 2083 2084 if (dwc->susp_clk == NULL) { 2085 dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk"); 2086 if (IS_ERR(dwc->susp_clk)) { 2087 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 2088 "could not get suspend clock\n"); 2089 } 2090 } 2091 2092 /* specific to Rockchip RK3588 */ 2093 dwc->utmi_clk = devm_clk_get_optional(dev, "utmi"); 2094 if (IS_ERR(dwc->utmi_clk)) { 2095 return dev_err_probe(dev, PTR_ERR(dwc->utmi_clk), 2096 "could not get utmi clock\n"); 2097 } 2098 2099 /* specific to Rockchip RK3588 */ 2100 dwc->pipe_clk = devm_clk_get_optional(dev, "pipe"); 2101 if (IS_ERR(dwc->pipe_clk)) { 2102 return dev_err_probe(dev, PTR_ERR(dwc->pipe_clk), 2103 "could not get pipe clock\n"); 2104 } 2105 2106 return 0; 2107} 2108 2109static int dwc3_get_num_ports(struct dwc3 *dwc) 2110{ 2111 void __iomem *base; 2112 u8 major_revision; 2113 u32 offset; 2114 u32 val; 2115 2116 /* 2117 * Remap xHCI address space to access XHCI ext cap regs since it is 2118 * needed to get information on number of ports present. 2119 */ 2120 base = ioremap(dwc->xhci_resources[0].start, 2121 resource_size(&dwc->xhci_resources[0])); 2122 if (!base) 2123 return -ENOMEM; 2124 2125 offset = 0; 2126 do { 2127 offset = xhci_find_next_ext_cap(base, offset, 2128 XHCI_EXT_CAPS_PROTOCOL); 2129 if (!offset) 2130 break; 2131 2132 val = readl(base + offset); 2133 major_revision = XHCI_EXT_PORT_MAJOR(val); 2134 2135 val = readl(base + offset + 0x08); 2136 if (major_revision == 0x03) { 2137 dwc->num_usb3_ports += XHCI_EXT_PORT_COUNT(val); 2138 } else if (major_revision <= 0x02) { 2139 dwc->num_usb2_ports += XHCI_EXT_PORT_COUNT(val); 2140 } else { 2141 dev_warn(dwc->dev, "unrecognized port major revision %d\n", 2142 major_revision); 2143 } 2144 } while (1); 2145 2146 dev_dbg(dwc->dev, "hs-ports: %u ss-ports: %u\n", 2147 dwc->num_usb2_ports, dwc->num_usb3_ports); 2148 2149 iounmap(base); 2150 2151 if (dwc->num_usb2_ports > DWC3_USB2_MAX_PORTS || 2152 dwc->num_usb3_ports > DWC3_USB3_MAX_PORTS) 2153 return -EINVAL; 2154 2155 return 0; 2156} 2157 2158static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc) 2159{ 2160 struct power_supply *usb_psy; 2161 const char *usb_psy_name; 2162 int ret; 2163 2164 ret = device_property_read_string(dwc->dev, "usb-psy-name", &usb_psy_name); 2165 if (ret < 0) 2166 return NULL; 2167 2168 usb_psy = power_supply_get_by_name(usb_psy_name); 2169 if (!usb_psy) 2170 return ERR_PTR(-EPROBE_DEFER); 2171 2172 return usb_psy; 2173} 2174 2175int dwc3_core_probe(const struct dwc3_probe_data *data) 2176{ 2177 struct dwc3 *dwc = data->dwc; 2178 struct device *dev = dwc->dev; 2179 struct resource dwc_res; 2180 unsigned int hw_mode; 2181 void __iomem *regs; 2182 struct resource *res = data->res; 2183 int ret; 2184 2185 dwc->xhci_resources[0].start = res->start; 2186 dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + 2187 DWC3_XHCI_REGS_END; 2188 dwc->xhci_resources[0].flags = res->flags; 2189 dwc->xhci_resources[0].name = res->name; 2190 2191 /* 2192 * Request memory region but exclude xHCI regs, 2193 * since it will be requested by the xhci-plat driver. 2194 */ 2195 dwc_res = *res; 2196 dwc_res.start += DWC3_GLOBALS_REGS_START; 2197 2198 if (dev->of_node) { 2199 struct device_node *parent = of_get_parent(dev->of_node); 2200 2201 if (of_device_is_compatible(parent, "realtek,rtd-dwc3")) { 2202 dwc_res.start -= DWC3_GLOBALS_REGS_START; 2203 dwc_res.start += DWC3_RTK_RTD_GLOBALS_REGS_START; 2204 } 2205 2206 of_node_put(parent); 2207 } 2208 2209 regs = devm_ioremap_resource(dev, &dwc_res); 2210 if (IS_ERR(regs)) 2211 return PTR_ERR(regs); 2212 2213 dwc->regs = regs; 2214 dwc->regs_size = resource_size(&dwc_res); 2215 2216 dwc3_get_properties(dwc); 2217 2218 dwc3_get_software_properties(dwc, &data->properties); 2219 2220 dwc->usb_psy = dwc3_get_usb_power_supply(dwc); 2221 if (IS_ERR(dwc->usb_psy)) 2222 return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n"); 2223 2224 if (!data->ignore_clocks_and_resets) { 2225 dwc->reset = devm_reset_control_array_get_optional_shared(dev); 2226 if (IS_ERR(dwc->reset)) { 2227 ret = PTR_ERR(dwc->reset); 2228 goto err_put_psy; 2229 } 2230 2231 ret = dwc3_get_clocks(dwc); 2232 if (ret) 2233 goto err_put_psy; 2234 } 2235 2236 ret = reset_control_deassert(dwc->reset); 2237 if (ret) 2238 goto err_put_psy; 2239 2240 ret = dwc3_clk_enable(dwc); 2241 if (ret) 2242 goto err_assert_reset; 2243 2244 if (!dwc3_core_is_valid(dwc)) { 2245 dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); 2246 ret = -ENODEV; 2247 goto err_disable_clks; 2248 } 2249 2250 dev_set_drvdata(dev, dwc); 2251 dwc3_cache_hwparams(dwc); 2252 2253 if (!dev_is_pci(dwc->sysdev) && 2254 DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) { 2255 ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); 2256 if (ret) 2257 goto err_disable_clks; 2258 } 2259 2260 /* 2261 * Currently only DWC3 controllers that are host-only capable 2262 * can have more than one port. 2263 */ 2264 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 2265 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) { 2266 ret = dwc3_get_num_ports(dwc); 2267 if (ret) 2268 goto err_disable_clks; 2269 } else { 2270 dwc->num_usb2_ports = 1; 2271 dwc->num_usb3_ports = 1; 2272 } 2273 2274 spin_lock_init(&dwc->lock); 2275 mutex_init(&dwc->mutex); 2276 2277 pm_runtime_get_noresume(dev); 2278 pm_runtime_set_active(dev); 2279 pm_runtime_use_autosuspend(dev); 2280 pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); 2281 pm_runtime_enable(dev); 2282 2283 pm_runtime_forbid(dev); 2284 2285 ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); 2286 if (ret) { 2287 dev_err(dwc->dev, "failed to allocate event buffers\n"); 2288 ret = -ENOMEM; 2289 goto err_allow_rpm; 2290 } 2291 2292 dwc->edev = dwc3_get_extcon(dwc); 2293 if (IS_ERR(dwc->edev)) { 2294 ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n"); 2295 goto err_free_event_buffers; 2296 } 2297 2298 ret = dwc3_get_dr_mode(dwc); 2299 if (ret) 2300 goto err_free_event_buffers; 2301 2302 ret = dwc3_core_init(dwc); 2303 if (ret) { 2304 dev_err_probe(dev, ret, "failed to initialize core\n"); 2305 goto err_free_event_buffers; 2306 } 2307 2308 dwc3_check_params(dwc); 2309 dwc3_debugfs_init(dwc); 2310 2311 if (!data->skip_core_init_mode) { 2312 ret = dwc3_core_init_mode(dwc); 2313 if (ret) 2314 goto err_exit_debugfs; 2315 } 2316 2317 pm_runtime_put(dev); 2318 2319 dma_set_max_seg_size(dev, UINT_MAX); 2320 2321 return 0; 2322 2323err_exit_debugfs: 2324 dwc3_debugfs_exit(dwc); 2325 dwc3_event_buffers_cleanup(dwc); 2326 dwc3_phy_power_off(dwc); 2327 dwc3_phy_exit(dwc); 2328 dwc3_ulpi_exit(dwc); 2329err_free_event_buffers: 2330 dwc3_free_event_buffers(dwc); 2331err_allow_rpm: 2332 pm_runtime_allow(dev); 2333 pm_runtime_disable(dev); 2334 pm_runtime_dont_use_autosuspend(dev); 2335 pm_runtime_set_suspended(dev); 2336 pm_runtime_put_noidle(dev); 2337err_disable_clks: 2338 dwc3_clk_disable(dwc); 2339err_assert_reset: 2340 reset_control_assert(dwc->reset); 2341err_put_psy: 2342 if (dwc->usb_psy) 2343 power_supply_put(dwc->usb_psy); 2344 2345 return ret; 2346} 2347EXPORT_SYMBOL_GPL(dwc3_core_probe); 2348 2349static int dwc3_probe(struct platform_device *pdev) 2350{ 2351 struct dwc3_probe_data probe_data = {}; 2352 struct resource *res; 2353 struct dwc3 *dwc; 2354 2355 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2356 if (!res) { 2357 dev_err(&pdev->dev, "missing memory resource\n"); 2358 return -ENODEV; 2359 } 2360 2361 dwc = devm_kzalloc(&pdev->dev, sizeof(*dwc), GFP_KERNEL); 2362 if (!dwc) 2363 return -ENOMEM; 2364 2365 dwc->dev = &pdev->dev; 2366 dwc->glue_ops = NULL; 2367 2368 probe_data.dwc = dwc; 2369 probe_data.res = res; 2370 probe_data.properties = DWC3_DEFAULT_PROPERTIES; 2371 2372 return dwc3_core_probe(&probe_data); 2373} 2374 2375void dwc3_core_remove(struct dwc3 *dwc) 2376{ 2377 pm_runtime_get_sync(dwc->dev); 2378 2379 dwc3_core_exit_mode(dwc); 2380 dwc3_debugfs_exit(dwc); 2381 2382 dwc3_core_exit(dwc); 2383 dwc3_ulpi_exit(dwc); 2384 2385 pm_runtime_allow(dwc->dev); 2386 pm_runtime_disable(dwc->dev); 2387 pm_runtime_dont_use_autosuspend(dwc->dev); 2388 pm_runtime_put_noidle(dwc->dev); 2389 /* 2390 * HACK: Clear the driver data, which is currently accessed by parent 2391 * glue drivers, before allowing the parent to suspend. 2392 */ 2393 dev_set_drvdata(dwc->dev, NULL); 2394 pm_runtime_set_suspended(dwc->dev); 2395 2396 dwc3_free_event_buffers(dwc); 2397 2398 if (dwc->usb_psy) 2399 power_supply_put(dwc->usb_psy); 2400} 2401EXPORT_SYMBOL_GPL(dwc3_core_remove); 2402 2403static void dwc3_remove(struct platform_device *pdev) 2404{ 2405 dwc3_core_remove(platform_get_drvdata(pdev)); 2406} 2407 2408#ifdef CONFIG_PM 2409static int dwc3_core_init_for_resume(struct dwc3 *dwc) 2410{ 2411 int ret; 2412 2413 ret = reset_control_deassert(dwc->reset); 2414 if (ret) 2415 return ret; 2416 2417 ret = dwc3_clk_enable(dwc); 2418 if (ret) 2419 goto assert_reset; 2420 2421 ret = dwc3_core_init(dwc); 2422 if (ret) 2423 goto disable_clks; 2424 2425 return 0; 2426 2427disable_clks: 2428 dwc3_clk_disable(dwc); 2429assert_reset: 2430 reset_control_assert(dwc->reset); 2431 2432 return ret; 2433} 2434 2435static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) 2436{ 2437 u32 reg; 2438 int i; 2439 int ret; 2440 2441 if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) { 2442 dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) & 2443 DWC3_GUSB2PHYCFG_SUSPHY) || 2444 (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) & 2445 DWC3_GUSB3PIPECTL_SUSPHY); 2446 /* 2447 * TI AM62 platform requires SUSPHY to be 2448 * enabled for system suspend to work. 2449 */ 2450 if (!dwc->susphy_state) 2451 dwc3_enable_susphy(dwc, true); 2452 } 2453 2454 switch (dwc->current_dr_role) { 2455 case DWC3_GCTL_PRTCAP_DEVICE: 2456 if (pm_runtime_suspended(dwc->dev)) 2457 break; 2458 ret = dwc3_gadget_suspend(dwc); 2459 if (ret) 2460 return ret; 2461 synchronize_irq(dwc->irq_gadget); 2462 dwc3_core_exit(dwc); 2463 break; 2464 case DWC3_GCTL_PRTCAP_HOST: 2465 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) { 2466 dwc3_core_exit(dwc); 2467 break; 2468 } 2469 2470 /* Let controller to suspend HSPHY before PHY driver suspends */ 2471 if (dwc->dis_u2_susphy_quirk || 2472 dwc->dis_enblslpm_quirk) { 2473 for (i = 0; i < dwc->num_usb2_ports; i++) { 2474 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 2475 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM | 2476 DWC3_GUSB2PHYCFG_SUSPHY; 2477 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 2478 } 2479 2480 /* Give some time for USB2 PHY to suspend */ 2481 usleep_range(5000, 6000); 2482 } 2483 2484 for (i = 0; i < dwc->num_usb2_ports; i++) 2485 phy_pm_runtime_put_sync(dwc->usb2_generic_phy[i]); 2486 for (i = 0; i < dwc->num_usb3_ports; i++) 2487 phy_pm_runtime_put_sync(dwc->usb3_generic_phy[i]); 2488 break; 2489 case DWC3_GCTL_PRTCAP_OTG: 2490 /* do nothing during runtime_suspend */ 2491 if (PMSG_IS_AUTO(msg)) 2492 break; 2493 2494 if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { 2495 ret = dwc3_gadget_suspend(dwc); 2496 if (ret) 2497 return ret; 2498 synchronize_irq(dwc->irq_gadget); 2499 } 2500 2501 dwc3_otg_exit(dwc); 2502 dwc3_core_exit(dwc); 2503 break; 2504 default: 2505 /* do nothing */ 2506 break; 2507 } 2508 2509 return 0; 2510} 2511 2512static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) 2513{ 2514 int ret; 2515 u32 reg; 2516 int i; 2517 2518 switch (dwc->current_dr_role) { 2519 case DWC3_GCTL_PRTCAP_DEVICE: 2520 ret = dwc3_core_init_for_resume(dwc); 2521 if (ret) 2522 return ret; 2523 2524 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); 2525 dwc3_gadget_resume(dwc); 2526 break; 2527 case DWC3_GCTL_PRTCAP_HOST: 2528 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) { 2529 ret = dwc3_core_init_for_resume(dwc); 2530 if (ret) 2531 return ret; 2532 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true); 2533 break; 2534 } 2535 /* Restore GUSB2PHYCFG bits that were modified in suspend */ 2536 for (i = 0; i < dwc->num_usb2_ports; i++) { 2537 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 2538 if (dwc->dis_u2_susphy_quirk) 2539 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 2540 2541 if (dwc->dis_enblslpm_quirk) 2542 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 2543 2544 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 2545 } 2546 2547 for (i = 0; i < dwc->num_usb2_ports; i++) 2548 phy_pm_runtime_get_sync(dwc->usb2_generic_phy[i]); 2549 for (i = 0; i < dwc->num_usb3_ports; i++) 2550 phy_pm_runtime_get_sync(dwc->usb3_generic_phy[i]); 2551 break; 2552 case DWC3_GCTL_PRTCAP_OTG: 2553 /* nothing to do on runtime_resume */ 2554 if (PMSG_IS_AUTO(msg)) 2555 break; 2556 2557 ret = dwc3_core_init_for_resume(dwc); 2558 if (ret) 2559 return ret; 2560 2561 dwc3_set_prtcap(dwc, dwc->current_dr_role, true); 2562 2563 dwc3_otg_init(dwc); 2564 if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) { 2565 dwc3_otg_host_init(dwc); 2566 } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { 2567 dwc3_gadget_resume(dwc); 2568 } 2569 2570 break; 2571 default: 2572 /* do nothing */ 2573 break; 2574 } 2575 2576 if (!PMSG_IS_AUTO(msg)) { 2577 /* restore SUSPHY state to that before system suspend. */ 2578 dwc3_enable_susphy(dwc, dwc->susphy_state); 2579 } 2580 2581 return 0; 2582} 2583 2584static int dwc3_runtime_checks(struct dwc3 *dwc) 2585{ 2586 switch (dwc->current_dr_role) { 2587 case DWC3_GCTL_PRTCAP_DEVICE: 2588 if (dwc->connected) 2589 return -EBUSY; 2590 break; 2591 case DWC3_GCTL_PRTCAP_HOST: 2592 default: 2593 /* do nothing */ 2594 break; 2595 } 2596 2597 return 0; 2598} 2599 2600int dwc3_runtime_suspend(struct dwc3 *dwc) 2601{ 2602 int ret; 2603 2604 if (dwc3_runtime_checks(dwc)) 2605 return -EBUSY; 2606 2607 ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND); 2608 if (ret) 2609 return ret; 2610 2611 return 0; 2612} 2613EXPORT_SYMBOL_GPL(dwc3_runtime_suspend); 2614 2615int dwc3_runtime_resume(struct dwc3 *dwc) 2616{ 2617 struct device *dev = dwc->dev; 2618 int ret; 2619 2620 ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME); 2621 if (ret) 2622 return ret; 2623 2624 switch (dwc->current_dr_role) { 2625 case DWC3_GCTL_PRTCAP_DEVICE: 2626 if (dwc->pending_events) { 2627 pm_runtime_put(dev); 2628 dwc->pending_events = false; 2629 enable_irq(dwc->irq_gadget); 2630 } 2631 break; 2632 case DWC3_GCTL_PRTCAP_HOST: 2633 default: 2634 /* do nothing */ 2635 break; 2636 } 2637 2638 pm_runtime_mark_last_busy(dev); 2639 2640 return 0; 2641} 2642EXPORT_SYMBOL_GPL(dwc3_runtime_resume); 2643 2644int dwc3_runtime_idle(struct dwc3 *dwc) 2645{ 2646 struct device *dev = dwc->dev; 2647 2648 switch (dwc->current_dr_role) { 2649 case DWC3_GCTL_PRTCAP_DEVICE: 2650 if (dwc3_runtime_checks(dwc)) 2651 return -EBUSY; 2652 break; 2653 case DWC3_GCTL_PRTCAP_HOST: 2654 default: 2655 /* do nothing */ 2656 break; 2657 } 2658 2659 pm_runtime_autosuspend(dev); 2660 2661 return 0; 2662} 2663EXPORT_SYMBOL_GPL(dwc3_runtime_idle); 2664 2665static int dwc3_plat_runtime_suspend(struct device *dev) 2666{ 2667 return dwc3_runtime_suspend(dev_get_drvdata(dev)); 2668} 2669 2670static int dwc3_plat_runtime_resume(struct device *dev) 2671{ 2672 return dwc3_runtime_resume(dev_get_drvdata(dev)); 2673} 2674 2675static int dwc3_plat_runtime_idle(struct device *dev) 2676{ 2677 return dwc3_runtime_idle(dev_get_drvdata(dev)); 2678} 2679#endif /* CONFIG_PM */ 2680 2681#ifdef CONFIG_PM_SLEEP 2682int dwc3_pm_suspend(struct dwc3 *dwc) 2683{ 2684 struct device *dev = dwc->dev; 2685 int ret; 2686 2687 ret = dwc3_suspend_common(dwc, PMSG_SUSPEND); 2688 if (ret) 2689 return ret; 2690 2691 pinctrl_pm_select_sleep_state(dev); 2692 2693 return 0; 2694} 2695EXPORT_SYMBOL_GPL(dwc3_pm_suspend); 2696 2697int dwc3_pm_resume(struct dwc3 *dwc) 2698{ 2699 struct device *dev = dwc->dev; 2700 int ret = 0; 2701 2702 pinctrl_pm_select_default_state(dev); 2703 2704 pm_runtime_disable(dev); 2705 ret = pm_runtime_set_active(dev); 2706 if (ret) 2707 goto out; 2708 2709 ret = dwc3_resume_common(dwc, PMSG_RESUME); 2710 if (ret) 2711 pm_runtime_set_suspended(dev); 2712 2713out: 2714 pm_runtime_enable(dev); 2715 2716 return ret; 2717} 2718EXPORT_SYMBOL_GPL(dwc3_pm_resume); 2719 2720void dwc3_pm_complete(struct dwc3 *dwc) 2721{ 2722 u32 reg; 2723 2724 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST && 2725 dwc->dis_split_quirk) { 2726 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 2727 reg |= DWC3_GUCTL3_SPLITDISABLE; 2728 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 2729 } 2730} 2731EXPORT_SYMBOL_GPL(dwc3_pm_complete); 2732 2733int dwc3_pm_prepare(struct dwc3 *dwc) 2734{ 2735 struct device *dev = dwc->dev; 2736 2737 /* 2738 * Indicate to the PM core that it may safely leave the device in 2739 * runtime suspend if runtime-suspended already in device mode. 2740 */ 2741 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE && 2742 pm_runtime_suspended(dev) && 2743 !dev_pinctrl(dev)) 2744 return 1; 2745 2746 return 0; 2747} 2748EXPORT_SYMBOL_GPL(dwc3_pm_prepare); 2749 2750static int dwc3_plat_suspend(struct device *dev) 2751{ 2752 return dwc3_pm_suspend(dev_get_drvdata(dev)); 2753} 2754 2755static int dwc3_plat_resume(struct device *dev) 2756{ 2757 return dwc3_pm_resume(dev_get_drvdata(dev)); 2758} 2759 2760static void dwc3_plat_complete(struct device *dev) 2761{ 2762 dwc3_pm_complete(dev_get_drvdata(dev)); 2763} 2764 2765static int dwc3_plat_prepare(struct device *dev) 2766{ 2767 return dwc3_pm_prepare(dev_get_drvdata(dev)); 2768} 2769#else 2770#define dwc3_plat_complete NULL 2771#define dwc3_plat_prepare NULL 2772#endif /* CONFIG_PM_SLEEP */ 2773 2774static const struct dev_pm_ops dwc3_dev_pm_ops = { 2775 SET_SYSTEM_SLEEP_PM_OPS(dwc3_plat_suspend, dwc3_plat_resume) 2776 .complete = dwc3_plat_complete, 2777 .prepare = dwc3_plat_prepare, 2778 /* 2779 * Runtime suspend halts the controller on disconnection. It relies on 2780 * platforms with custom connection notification to start the controller 2781 * again. 2782 */ 2783 SET_RUNTIME_PM_OPS(dwc3_plat_runtime_suspend, dwc3_plat_runtime_resume, 2784 dwc3_plat_runtime_idle) 2785}; 2786 2787#ifdef CONFIG_OF 2788static const struct of_device_id of_dwc3_match[] = { 2789 { 2790 .compatible = "snps,dwc3" 2791 }, 2792 { 2793 .compatible = "synopsys,dwc3" 2794 }, 2795 { }, 2796}; 2797MODULE_DEVICE_TABLE(of, of_dwc3_match); 2798#endif 2799 2800#ifdef CONFIG_ACPI 2801 2802#define ACPI_ID_INTEL_BSW "808622B7" 2803 2804static const struct acpi_device_id dwc3_acpi_match[] = { 2805 { ACPI_ID_INTEL_BSW, 0 }, 2806 { }, 2807}; 2808MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match); 2809#endif 2810 2811static struct platform_driver dwc3_driver = { 2812 .probe = dwc3_probe, 2813 .remove = dwc3_remove, 2814 .driver = { 2815 .name = "dwc3", 2816 .of_match_table = of_match_ptr(of_dwc3_match), 2817 .acpi_match_table = ACPI_PTR(dwc3_acpi_match), 2818 .pm = &dwc3_dev_pm_ops, 2819 }, 2820}; 2821 2822module_platform_driver(dwc3_driver); 2823 2824MODULE_ALIAS("platform:dwc3"); 2825MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); 2826MODULE_LICENSE("GPL v2"); 2827MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");