Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'musb-for-v3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb into usb-next

usb: musb: patches for v3.7 merge window

Here we have a bunch of miscellaneous cleanups and fixes
to the musb driver. It fixes a bunch of mistakes errors
which nobody has triggered before, so I'm not Ccing stable
tree.

We are finally improving OMAP's VBUS/ID Mailbox usage so
that we can introduce our PHY drivers properly. Also, we're
adding support for multiple instances of the MUSB IP in
the same SoC, as seen on some platforms from TI which
have 2 MUSB instances.

Other than that, we have some small fixes like not kicking
DMA for a zero byte transfer, or properly handling NAK timeout
on MUSB's host side, and the enabling of DMA Mode1 for any
transfers which are aligned to wMaxPacketSize.

All patches have been pending on mailing list for a long time
and I don't expect any big surprises with this pull request.

+923 -658
+14
Documentation/devicetree/bindings/usb/am33xx-usb.txt
··· 1 + AM33XX MUSB GLUE 2 + - compatible : Should be "ti,musb-am33xx" 3 + - ti,hwmods : must be "usb_otg_hs" 4 + - multipoint : Should be "1" indicating the musb controller supports 5 + multipoint. This is a MUSB configuration-specific setting. 6 + - num_eps : Specifies the number of endpoints. This is also a 7 + MUSB configuration-specific setting. Should be set to "16" 8 + - ram_bits : Specifies the ram address size. Should be set to "12" 9 + - port0_mode : Should be "3" to represent OTG. "1" signifies HOST and "2" 10 + represents PERIPHERAL. 11 + - port1_mode : Should be "1" to represent HOST. "3" signifies OTG and "2" 12 + represents PERIPHERAL. 13 + - power : Should be "250". This signifies the controller can supply upto 14 + 500mA when operating in host mode.
+33
Documentation/devicetree/bindings/usb/omap-usb.txt
··· 1 + OMAP GLUE 2 + 3 + OMAP MUSB GLUE 4 + - compatible : Should be "ti,omap4-musb" or "ti,omap3-musb" 5 + - ti,hwmods : must be "usb_otg_hs" 6 + - multipoint : Should be "1" indicating the musb controller supports 7 + multipoint. This is a MUSB configuration-specific setting. 8 + - num_eps : Specifies the number of endpoints. This is also a 9 + MUSB configuration-specific setting. Should be set to "16" 10 + - ram_bits : Specifies the ram address size. Should be set to "12" 11 + - interface_type : This is a board specific setting to describe the type of 12 + interface between the controller and the phy. It should be "0" or "1" 13 + specifying ULPI and UTMI respectively. 14 + - mode : Should be "3" to represent OTG. "1" signifies HOST and "2" 15 + represents PERIPHERAL. 16 + - power : Should be "50". This signifies the controller can supply upto 17 + 100mA when operating in host mode. 18 + 19 + SOC specific device node entry 20 + usb_otg_hs: usb_otg_hs@4a0ab000 { 21 + compatible = "ti,omap4-musb"; 22 + ti,hwmods = "usb_otg_hs"; 23 + multipoint = <1>; 24 + num_eps = <16>; 25 + ram_bits = <12>; 26 + }; 27 + 28 + Board specific device node entry 29 + &usb_otg_hs { 30 + interface_type = <1>; 31 + mode = <3>; 32 + power = <50>; 33 + };
+6
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
··· 5890 5890 .pa_end = 0x4a0ab003, 5891 5891 .flags = ADDR_TYPE_RT 5892 5892 }, 5893 + { 5894 + /* XXX: Remove this once control module driver is in place */ 5895 + .pa_start = 0x4a00233c, 5896 + .pa_end = 0x4a00233f, 5897 + .flags = ADDR_TYPE_RT 5898 + }, 5893 5899 { } 5894 5900 }; 5895 5901
+1 -1
drivers/usb/musb/Kconfig
··· 19 19 it's being used with, including the USB peripheral role, 20 20 or the USB host role, or both. 21 21 22 - Texas Instruments familiies using this IP include DaVinci 22 + Texas Instruments families using this IP include DaVinci 23 23 (35x, 644x ...), OMAP 243x, OMAP 3, and TUSB 6010. 24 24 25 25 Analog Devices parts using this IP include Blackfin BF54x,
+35 -31
drivers/usb/musb/am35x.c
··· 108 108 musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK); 109 109 110 110 /* Force the DRVVBUS IRQ so we can start polling for ID change. */ 111 - if (is_otg_enabled(musb)) 112 - musb_writel(reg_base, CORE_INTR_SRC_SET_REG, 113 - AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT); 111 + musb_writel(reg_base, CORE_INTR_SRC_SET_REG, 112 + AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT); 114 113 } 115 114 116 115 /* ··· 173 174 MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT); 174 175 break; 175 176 case OTG_STATE_B_IDLE: 176 - if (!is_peripheral_enabled(musb)) 177 - break; 178 - 179 177 devctl = musb_readb(mregs, MUSB_DEVCTL); 180 178 if (devctl & MUSB_DEVCTL_BDEVICE) 181 179 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); ··· 188 192 static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout) 189 193 { 190 194 static unsigned long last_timer; 191 - 192 - if (!is_otg_enabled(musb)) 193 - return; 194 195 195 196 if (timeout == 0) 196 197 timeout = jiffies + msecs_to_jiffies(3); ··· 265 272 u8 devctl = musb_readb(mregs, MUSB_DEVCTL); 266 273 int err; 267 274 268 - err = is_host_enabled(musb) && (musb->int_usb & 269 - MUSB_INTR_VBUSERROR); 275 + err = musb->int_usb & MUSB_INTR_VBUSERROR; 270 276 if (err) { 271 277 /* 272 278 * The Mentor core doesn't debounce VBUS as needed ··· 282 290 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; 283 291 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 284 292 WARNING("VBUS error workaround (delay coming)\n"); 285 - } else if (is_host_enabled(musb) && drvvbus) { 293 + } else if (drvvbus) { 286 294 MUSB_HST_MODE(musb); 287 295 otg->default_a = 1; 288 296 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; ··· 319 327 } 320 328 321 329 /* Poll for ID change */ 322 - if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE) 330 + if (musb->xceiv->state == OTG_STATE_B_IDLE) 323 331 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 324 332 325 333 spin_unlock_irqrestore(&musb->lock, flags); ··· 362 370 if (IS_ERR_OR_NULL(musb->xceiv)) 363 371 return -ENODEV; 364 372 365 - if (is_host_enabled(musb)) 366 - setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); 373 + setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); 367 374 368 375 /* Reset the musb */ 369 376 if (data->reset) ··· 392 401 struct musb_hdrc_platform_data *plat = dev->platform_data; 393 402 struct omap_musb_board_data *data = plat->board_data; 394 403 395 - if (is_host_enabled(musb)) 396 - del_timer_sync(&otg_workaround); 404 + del_timer_sync(&otg_workaround); 397 405 398 406 /* Shutdown the on-chip PHY and its PLL. */ 399 407 if (data->set_phy_power) ··· 459 469 struct clk *clk; 460 470 461 471 int ret = -ENOMEM; 472 + int musbid; 462 473 463 474 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 464 475 if (!glue) { ··· 467 476 goto err0; 468 477 } 469 478 470 - musb = platform_device_alloc("musb-hdrc", -1); 479 + /* get the musb id */ 480 + musbid = musb_get_id(&pdev->dev, GFP_KERNEL); 481 + if (musbid < 0) { 482 + dev_err(&pdev->dev, "failed to allocate musb id\n"); 483 + ret = -ENOMEM; 484 + goto err1; 485 + } 486 + 487 + musb = platform_device_alloc("musb-hdrc", musbid); 471 488 if (!musb) { 472 489 dev_err(&pdev->dev, "failed to allocate musb device\n"); 473 - goto err1; 490 + goto err2; 474 491 } 475 492 476 493 phy_clk = clk_get(&pdev->dev, "fck"); 477 494 if (IS_ERR(phy_clk)) { 478 495 dev_err(&pdev->dev, "failed to get PHY clock\n"); 479 496 ret = PTR_ERR(phy_clk); 480 - goto err2; 497 + goto err3; 481 498 } 482 499 483 500 clk = clk_get(&pdev->dev, "ick"); 484 501 if (IS_ERR(clk)) { 485 502 dev_err(&pdev->dev, "failed to get clock\n"); 486 503 ret = PTR_ERR(clk); 487 - goto err3; 504 + goto err4; 488 505 } 489 506 490 507 ret = clk_enable(phy_clk); 491 508 if (ret) { 492 509 dev_err(&pdev->dev, "failed to enable PHY clock\n"); 493 - goto err4; 510 + goto err5; 494 511 } 495 512 496 513 ret = clk_enable(clk); 497 514 if (ret) { 498 515 dev_err(&pdev->dev, "failed to enable clock\n"); 499 - goto err5; 516 + goto err6; 500 517 } 501 518 519 + musb->id = musbid; 502 520 musb->dev.parent = &pdev->dev; 503 521 musb->dev.dma_mask = &am35x_dmamask; 504 522 musb->dev.coherent_dma_mask = am35x_dmamask; ··· 525 525 pdev->num_resources); 526 526 if (ret) { 527 527 dev_err(&pdev->dev, "failed to add resources\n"); 528 - goto err6; 528 + goto err7; 529 529 } 530 530 531 531 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 532 532 if (ret) { 533 533 dev_err(&pdev->dev, "failed to add platform_data\n"); 534 - goto err6; 534 + goto err7; 535 535 } 536 536 537 537 ret = platform_device_add(musb); 538 538 if (ret) { 539 539 dev_err(&pdev->dev, "failed to register musb device\n"); 540 - goto err6; 540 + goto err7; 541 541 } 542 542 543 543 return 0; 544 544 545 - err6: 545 + err7: 546 546 clk_disable(clk); 547 547 548 - err5: 548 + err6: 549 549 clk_disable(phy_clk); 550 550 551 - err4: 551 + err5: 552 552 clk_put(clk); 553 553 554 - err3: 554 + err4: 555 555 clk_put(phy_clk); 556 556 557 - err2: 557 + err3: 558 558 platform_device_put(musb); 559 + 560 + err2: 561 + musb_put_id(&pdev->dev, musbid); 559 562 560 563 err1: 561 564 kfree(glue); ··· 571 568 { 572 569 struct am35x_glue *glue = platform_get_drvdata(pdev); 573 570 571 + musb_put_id(&pdev->dev, glue->musb->id); 574 572 platform_device_del(glue->musb); 575 573 platform_device_put(glue->musb); 576 574 clk_disable(glue->clk);
+31 -34
drivers/usb/musb/blackfin.c
··· 185 185 } 186 186 187 187 /* Start sampling ID pin, when plug is removed from MUSB */ 188 - if ((is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE 189 - || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) || 188 + if ((musb->xceiv->state == OTG_STATE_B_IDLE 189 + || musb->xceiv->state == OTG_STATE_A_WAIT_BCON) || 190 190 (musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) { 191 191 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); 192 192 musb->a_wait_bcon = TIMER_DELAY; ··· 229 229 230 230 val = MUSB_INTR_SUSPEND | MUSB_INTR_VBUSERROR; 231 231 musb_writeb(musb->mregs, MUSB_INTRUSB, val); 232 - if (is_otg_enabled(musb)) 233 - musb->xceiv->state = OTG_STATE_B_IDLE; 234 - else 235 - musb_writeb(musb->mregs, MUSB_POWER, MUSB_POWER_HSENAB); 232 + musb->xceiv->state = OTG_STATE_B_IDLE; 236 233 } 237 234 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); 238 235 break; 239 236 case OTG_STATE_B_IDLE: 240 - 241 - if (!is_peripheral_enabled(musb)) 242 - break; 243 - /* Start a new session. It seems that MUSB needs taking 237 + /* 238 + * Start a new session. It seems that MUSB needs taking 244 239 * some time to recognize the type of the plug inserted? 245 240 */ 246 241 val = musb_readw(musb->mregs, MUSB_DEVCTL); ··· 291 296 292 297 static void bfin_musb_enable(struct musb *musb) 293 298 { 294 - if (!is_otg_enabled(musb) && is_host_enabled(musb)) { 295 - mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); 296 - musb->a_wait_bcon = TIMER_DELAY; 297 - } 299 + /* REVISIT is this really correct ? */ 298 300 } 299 301 300 302 static void bfin_musb_disable(struct musb *musb) ··· 314 322 static int bfin_musb_set_power(struct usb_phy *x, unsigned mA) 315 323 { 316 324 return 0; 317 - } 318 - 319 - static void bfin_musb_try_idle(struct musb *musb, unsigned long timeout) 320 - { 321 - if (!is_otg_enabled(musb) && is_host_enabled(musb)) 322 - mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); 323 325 } 324 326 325 327 static int bfin_musb_vbus_status(struct musb *musb) ··· 411 425 412 426 bfin_musb_reg_init(musb); 413 427 414 - if (is_host_enabled(musb)) { 415 - setup_timer(&musb_conn_timer, 416 - musb_conn_timer_handler, (unsigned long) musb); 417 - } 418 - if (is_peripheral_enabled(musb)) 419 - musb->xceiv->set_power = bfin_musb_set_power; 428 + setup_timer(&musb_conn_timer, musb_conn_timer_handler, 429 + (unsigned long) musb); 430 + 431 + musb->xceiv->set_power = bfin_musb_set_power; 420 432 421 433 musb->isr = blackfin_interrupt; 422 434 musb->double_buffer_not_ok = true; ··· 439 455 .disable = bfin_musb_disable, 440 456 441 457 .set_mode = bfin_musb_set_mode, 442 - .try_idle = bfin_musb_try_idle, 443 458 444 459 .vbus_status = bfin_musb_vbus_status, 445 460 .set_vbus = bfin_musb_set_vbus, ··· 455 472 struct bfin_glue *glue; 456 473 457 474 int ret = -ENOMEM; 475 + int musbid; 458 476 459 477 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 460 478 if (!glue) { ··· 463 479 goto err0; 464 480 } 465 481 466 - musb = platform_device_alloc("musb-hdrc", -1); 467 - if (!musb) { 468 - dev_err(&pdev->dev, "failed to allocate musb device\n"); 482 + /* get the musb id */ 483 + musbid = musb_get_id(&pdev->dev, GFP_KERNEL); 484 + if (musbid < 0) { 485 + dev_err(&pdev->dev, "failed to allocate musb id\n"); 486 + ret = -ENOMEM; 469 487 goto err1; 470 488 } 471 489 490 + musb = platform_device_alloc("musb-hdrc", musbid); 491 + if (!musb) { 492 + dev_err(&pdev->dev, "failed to allocate musb device\n"); 493 + goto err2; 494 + } 495 + 496 + musb->id = musbid; 472 497 musb->dev.parent = &pdev->dev; 473 498 musb->dev.dma_mask = &bfin_dmamask; 474 499 musb->dev.coherent_dma_mask = bfin_dmamask; ··· 493 500 pdev->num_resources); 494 501 if (ret) { 495 502 dev_err(&pdev->dev, "failed to add resources\n"); 496 - goto err2; 503 + goto err3; 497 504 } 498 505 499 506 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 500 507 if (ret) { 501 508 dev_err(&pdev->dev, "failed to add platform_data\n"); 502 - goto err2; 509 + goto err3; 503 510 } 504 511 505 512 ret = platform_device_add(musb); 506 513 if (ret) { 507 514 dev_err(&pdev->dev, "failed to register musb device\n"); 508 - goto err2; 515 + goto err3; 509 516 } 510 517 511 518 return 0; 512 519 513 - err2: 520 + err3: 514 521 platform_device_put(musb); 522 + 523 + err2: 524 + musb_put_id(&pdev->dev, musbid); 515 525 516 526 err1: 517 527 kfree(glue); ··· 527 531 { 528 532 struct bfin_glue *glue = platform_get_drvdata(pdev); 529 533 534 + musb_put_id(&pdev->dev, glue->musb->id); 530 535 platform_device_del(glue->musb); 531 536 platform_device_put(glue->musb); 532 537 kfree(glue);
+1 -1
drivers/usb/musb/cppi_dma.c
··· 1316 1316 } 1317 1317 1318 1318 /* Instantiate a software object representing a DMA controller. */ 1319 - struct dma_controller *__init 1319 + struct dma_controller *__devinit 1320 1320 dma_controller_create(struct musb *musb, void __iomem *mregs) 1321 1321 { 1322 1322 struct cppi *controller;
+31 -27
drivers/usb/musb/da8xx.c
··· 156 156 musb_writel(reg_base, DA8XX_USB_INTR_MASK_SET_REG, mask); 157 157 158 158 /* Force the DRVVBUS IRQ so we can start polling for ID change. */ 159 - if (is_otg_enabled(musb)) 160 - musb_writel(reg_base, DA8XX_USB_INTR_SRC_SET_REG, 161 - DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT); 159 + musb_writel(reg_base, DA8XX_USB_INTR_SRC_SET_REG, 160 + DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT); 162 161 } 163 162 164 163 /** ··· 231 232 MUSB_INTR_VBUSERROR << DA8XX_INTR_USB_SHIFT); 232 233 break; 233 234 case OTG_STATE_B_IDLE: 234 - if (!is_peripheral_enabled(musb)) 235 - break; 236 - 237 235 /* 238 236 * There's no ID-changed IRQ, so we have no good way to tell 239 237 * when to switch to the A-Default state machine (by setting ··· 259 263 static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout) 260 264 { 261 265 static unsigned long last_timer; 262 - 263 - if (!is_otg_enabled(musb)) 264 - return; 265 266 266 267 if (timeout == 0) 267 268 timeout = jiffies + msecs_to_jiffies(3); ··· 327 334 u8 devctl = musb_readb(mregs, MUSB_DEVCTL); 328 335 int err; 329 336 330 - err = is_host_enabled(musb) && (musb->int_usb & 331 - MUSB_INTR_VBUSERROR); 337 + err = musb->int_usb & USB_INTR_VBUSERROR; 332 338 if (err) { 333 339 /* 334 340 * The Mentor core doesn't debounce VBUS as needed ··· 344 352 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; 345 353 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 346 354 WARNING("VBUS error workaround (delay coming)\n"); 347 - } else if (is_host_enabled(musb) && drvvbus) { 355 + } else if (drvvbus) { 348 356 MUSB_HST_MODE(musb); 349 357 otg->default_a = 1; 350 358 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; ··· 375 383 musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0); 376 384 377 385 /* Poll for ID change */ 378 - if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE) 386 + if (musb->xceiv->state == OTG_STATE_B_IDLE) 379 387 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 380 388 381 389 spin_unlock_irqrestore(&musb->lock, flags); ··· 423 431 if (IS_ERR_OR_NULL(musb->xceiv)) 424 432 goto fail; 425 433 426 - if (is_host_enabled(musb)) 427 - setup_timer(&otg_workaround, otg_timer, (unsigned long)musb); 434 + setup_timer(&otg_workaround, otg_timer, (unsigned long)musb); 428 435 429 436 /* Reset the controller */ 430 437 musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK); ··· 446 455 447 456 static int da8xx_musb_exit(struct musb *musb) 448 457 { 449 - if (is_host_enabled(musb)) 450 - del_timer_sync(&otg_workaround); 458 + del_timer_sync(&otg_workaround); 451 459 452 460 phy_off(); 453 461 ··· 480 490 struct clk *clk; 481 491 482 492 int ret = -ENOMEM; 493 + int musbid; 483 494 484 495 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 485 496 if (!glue) { ··· 488 497 goto err0; 489 498 } 490 499 491 - musb = platform_device_alloc("musb-hdrc", -1); 500 + /* get the musb id */ 501 + musbid = musb_get_id(&pdev->dev, GFP_KERNEL); 502 + if (musbid < 0) { 503 + dev_err(&pdev->dev, "failed to allocate musb id\n"); 504 + ret = -ENOMEM; 505 + goto err1; 506 + } 507 + 508 + musb = platform_device_alloc("musb-hdrc", musbid); 492 509 if (!musb) { 493 510 dev_err(&pdev->dev, "failed to allocate musb device\n"); 494 - goto err1; 511 + goto err2; 495 512 } 496 513 497 514 clk = clk_get(&pdev->dev, "usb20"); 498 515 if (IS_ERR(clk)) { 499 516 dev_err(&pdev->dev, "failed to get clock\n"); 500 517 ret = PTR_ERR(clk); 501 - goto err2; 518 + goto err3; 502 519 } 503 520 504 521 ret = clk_enable(clk); 505 522 if (ret) { 506 523 dev_err(&pdev->dev, "failed to enable clock\n"); 507 - goto err3; 524 + goto err4; 508 525 } 509 526 527 + musb->id = musbid; 510 528 musb->dev.parent = &pdev->dev; 511 529 musb->dev.dma_mask = &da8xx_dmamask; 512 530 musb->dev.coherent_dma_mask = da8xx_dmamask; ··· 532 532 pdev->num_resources); 533 533 if (ret) { 534 534 dev_err(&pdev->dev, "failed to add resources\n"); 535 - goto err4; 535 + goto err5; 536 536 } 537 537 538 538 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 539 539 if (ret) { 540 540 dev_err(&pdev->dev, "failed to add platform_data\n"); 541 - goto err4; 541 + goto err5; 542 542 } 543 543 544 544 ret = platform_device_add(musb); 545 545 if (ret) { 546 546 dev_err(&pdev->dev, "failed to register musb device\n"); 547 - goto err4; 547 + goto err5; 548 548 } 549 549 550 550 return 0; 551 551 552 - err4: 552 + err5: 553 553 clk_disable(clk); 554 554 555 - err3: 555 + err4: 556 556 clk_put(clk); 557 557 558 - err2: 558 + err3: 559 559 platform_device_put(musb); 560 + 561 + err2: 562 + musb_put_id(&pdev->dev, musbid); 560 563 561 564 err1: 562 565 kfree(glue); ··· 572 569 { 573 570 struct da8xx_glue *glue = platform_get_drvdata(pdev); 574 571 572 + musb_put_id(&pdev->dev, glue->musb->id); 575 573 platform_device_del(glue->musb); 576 574 platform_device_put(glue->musb); 577 575 clk_disable(glue->clk);
+34 -32
drivers/usb/musb/davinci.c
··· 116 116 dma_off = 0; 117 117 118 118 /* force a DRVVBUS irq so we can start polling for ID change */ 119 - if (is_otg_enabled(musb)) 120 - musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, 119 + musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, 121 120 DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT); 122 121 } 123 122 ··· 234 235 MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT); 235 236 break; 236 237 case OTG_STATE_B_IDLE: 237 - if (!is_peripheral_enabled(musb)) 238 - break; 239 - 240 - /* There's no ID-changed IRQ, so we have no good way to tell 238 + /* 239 + * There's no ID-changed IRQ, so we have no good way to tell 241 240 * when to switch to the A-Default state machine (by setting 242 241 * the DEVCTL.SESSION flag). 243 242 * ··· 313 316 u8 devctl = musb_readb(mregs, MUSB_DEVCTL); 314 317 int err = musb->int_usb & MUSB_INTR_VBUSERROR; 315 318 316 - err = is_host_enabled(musb) 317 - && (musb->int_usb & MUSB_INTR_VBUSERROR); 319 + err = musb->int_usb & MUSB_INTR_VBUSERROR; 318 320 if (err) { 319 321 /* The Mentor core doesn't debounce VBUS as needed 320 322 * to cope with device connect current spikes. This ··· 329 333 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; 330 334 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 331 335 WARNING("VBUS error workaround (delay coming)\n"); 332 - } else if (is_host_enabled(musb) && drvvbus) { 336 + } else if (drvvbus) { 333 337 MUSB_HST_MODE(musb); 334 338 otg->default_a = 1; 335 339 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; ··· 362 366 musb_writel(tibase, DAVINCI_USB_EOI_REG, 0); 363 367 364 368 /* poll for ID change */ 365 - if (is_otg_enabled(musb) 366 - && musb->xceiv->state == OTG_STATE_B_IDLE) 369 + if (musb->xceiv->state == OTG_STATE_B_IDLE) 367 370 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 368 371 369 372 spin_unlock_irqrestore(&musb->lock, flags); ··· 393 398 if (revision == 0) 394 399 goto fail; 395 400 396 - if (is_host_enabled(musb)) 397 - setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); 401 + setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); 398 402 399 403 davinci_musb_source_power(musb, 0, 1); 400 404 ··· 414 420 if (cpu_is_davinci_dm355()) { 415 421 u32 deepsleep = __raw_readl(DM355_DEEPSLEEP); 416 422 417 - if (is_host_enabled(musb)) { 418 - deepsleep &= ~DRVVBUS_OVERRIDE; 419 - } else { 420 - deepsleep &= ~DRVVBUS_FORCE; 421 - deepsleep |= DRVVBUS_OVERRIDE; 422 - } 423 + deepsleep &= ~DRVVBUS_FORCE; 423 424 __raw_writel(deepsleep, DM355_DEEPSLEEP); 424 425 } 425 426 ··· 443 454 444 455 static int davinci_musb_exit(struct musb *musb) 445 456 { 446 - if (is_host_enabled(musb)) 447 - del_timer_sync(&otg_workaround); 457 + del_timer_sync(&otg_workaround); 448 458 449 459 /* force VBUS off */ 450 460 if (cpu_is_davinci_dm355()) { ··· 457 469 davinci_musb_source_power(musb, 0 /*off*/, 1); 458 470 459 471 /* delay, to avoid problems with module reload */ 460 - if (is_host_enabled(musb) && musb->xceiv->otg->default_a) { 472 + if (musb->xceiv->otg->default_a) { 461 473 int maxdelay = 30; 462 474 u8 devctl, warn = 0; 463 475 ··· 512 524 struct clk *clk; 513 525 514 526 int ret = -ENOMEM; 527 + int musbid; 515 528 516 529 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 517 530 if (!glue) { ··· 520 531 goto err0; 521 532 } 522 533 523 - musb = platform_device_alloc("musb-hdrc", -1); 534 + /* get the musb id */ 535 + musbid = musb_get_id(&pdev->dev, GFP_KERNEL); 536 + if (musbid < 0) { 537 + dev_err(&pdev->dev, "failed to allocate musb id\n"); 538 + ret = -ENOMEM; 539 + goto err1; 540 + } 541 + 542 + musb = platform_device_alloc("musb-hdrc", musbid); 524 543 if (!musb) { 525 544 dev_err(&pdev->dev, "failed to allocate musb device\n"); 526 - goto err1; 545 + goto err2; 527 546 } 528 547 529 548 clk = clk_get(&pdev->dev, "usb"); 530 549 if (IS_ERR(clk)) { 531 550 dev_err(&pdev->dev, "failed to get clock\n"); 532 551 ret = PTR_ERR(clk); 533 - goto err2; 552 + goto err3; 534 553 } 535 554 536 555 ret = clk_enable(clk); 537 556 if (ret) { 538 557 dev_err(&pdev->dev, "failed to enable clock\n"); 539 - goto err3; 558 + goto err4; 540 559 } 541 560 561 + musb->id = musbid; 542 562 musb->dev.parent = &pdev->dev; 543 563 musb->dev.dma_mask = &davinci_dmamask; 544 564 musb->dev.coherent_dma_mask = davinci_dmamask; ··· 564 566 pdev->num_resources); 565 567 if (ret) { 566 568 dev_err(&pdev->dev, "failed to add resources\n"); 567 - goto err4; 569 + goto err5; 568 570 } 569 571 570 572 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 571 573 if (ret) { 572 574 dev_err(&pdev->dev, "failed to add platform_data\n"); 573 - goto err4; 575 + goto err5; 574 576 } 575 577 576 578 ret = platform_device_add(musb); 577 579 if (ret) { 578 580 dev_err(&pdev->dev, "failed to register musb device\n"); 579 - goto err4; 581 + goto err5; 580 582 } 581 583 582 584 return 0; 583 585 584 - err4: 586 + err5: 585 587 clk_disable(clk); 586 588 587 - err3: 589 + err4: 588 590 clk_put(clk); 589 591 590 - err2: 592 + err3: 591 593 platform_device_put(musb); 594 + 595 + err2: 596 + musb_put_id(&pdev->dev, musbid); 592 597 593 598 err1: 594 599 kfree(glue); ··· 604 603 { 605 604 struct davinci_glue *glue = platform_get_drvdata(pdev); 606 605 606 + musb_put_id(&pdev->dev, glue->musb->id); 607 607 platform_device_del(glue->musb); 608 608 platform_device_put(glue->musb); 609 609 clk_disable(glue->clk);
+137 -188
drivers/usb/musb/musb_core.c
··· 99 99 #include <linux/prefetch.h> 100 100 #include <linux/platform_device.h> 101 101 #include <linux/io.h> 102 + #include <linux/idr.h> 103 + #include <linux/dma-mapping.h> 102 104 103 105 #include "musb_core.h" 104 106 ··· 116 114 117 115 #define MUSB_DRIVER_NAME "musb-hdrc" 118 116 const char musb_driver_name[] = MUSB_DRIVER_NAME; 117 + static DEFINE_IDA(musb_ida); 119 118 120 119 MODULE_DESCRIPTION(DRIVER_INFO); 121 120 MODULE_AUTHOR(DRIVER_AUTHOR); ··· 132 129 } 133 130 134 131 /*-------------------------------------------------------------------------*/ 132 + 133 + int musb_get_id(struct device *dev, gfp_t gfp_mask) 134 + { 135 + int ret; 136 + int id; 137 + 138 + ret = ida_pre_get(&musb_ida, gfp_mask); 139 + if (!ret) { 140 + dev_err(dev, "failed to reserve resource for id\n"); 141 + return -ENOMEM; 142 + } 143 + 144 + ret = ida_get_new(&musb_ida, &id); 145 + if (ret < 0) { 146 + dev_err(dev, "failed to allocate a new id\n"); 147 + return ret; 148 + } 149 + 150 + return id; 151 + } 152 + EXPORT_SYMBOL_GPL(musb_get_id); 153 + 154 + void musb_put_id(struct device *dev, int id) 155 + { 156 + 157 + dev_dbg(dev, "removing id %d\n", id); 158 + ida_remove(&musb_ida, id); 159 + } 160 + EXPORT_SYMBOL_GPL(musb_put_id); 135 161 136 162 #ifndef CONFIG_BLACKFIN 137 163 static int musb_ulpi_read(struct usb_phy *phy, u32 offset) ··· 266 234 struct musb *musb = hw_ep->musb; 267 235 void __iomem *fifo = hw_ep->fifo; 268 236 237 + if (unlikely(len == 0)) 238 + return; 239 + 269 240 prefetch((u8 *)src); 270 241 271 242 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n", ··· 310 275 { 311 276 struct musb *musb = hw_ep->musb; 312 277 void __iomem *fifo = hw_ep->fifo; 278 + 279 + if (unlikely(len == 0)) 280 + return; 313 281 314 282 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n", 315 283 'R', hw_ep->epnum, fifo, len, dst); ··· 386 348 /* 387 349 * Handles OTG hnp timeouts, such as b_ase0_brst 388 350 */ 389 - void musb_otg_timer_func(unsigned long data) 351 + static void musb_otg_timer_func(unsigned long data) 390 352 { 391 353 struct musb *musb = (struct musb *)data; 392 354 unsigned long flags; ··· 681 643 break; 682 644 case OTG_STATE_B_PERIPHERAL: 683 645 musb_g_suspend(musb); 684 - musb->is_active = is_otg_enabled(musb) 685 - && otg->gadget->b_hnp_enable; 646 + musb->is_active = otg->gadget->b_hnp_enable; 686 647 if (musb->is_active) { 687 648 musb->xceiv->state = OTG_STATE_B_WAIT_ACON; 688 649 dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n"); ··· 697 660 break; 698 661 case OTG_STATE_A_HOST: 699 662 musb->xceiv->state = OTG_STATE_A_SUSPEND; 700 - musb->is_active = is_otg_enabled(musb) 701 - && otg->host->b_hnp_enable; 663 + musb->is_active = otg->host->b_hnp_enable; 702 664 break; 703 665 case OTG_STATE_B_HOST: 704 666 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ ··· 785 749 case OTG_STATE_A_SUSPEND: 786 750 usb_hcd_resume_root_hub(musb_to_hcd(musb)); 787 751 musb_root_disconnect(musb); 788 - if (musb->a_wait_bcon != 0 && is_otg_enabled(musb)) 752 + if (musb->a_wait_bcon != 0) 789 753 musb_platform_try_idle(musb, jiffies 790 754 + msecs_to_jiffies(musb->a_wait_bcon)); 791 755 break; ··· 823 787 */ 824 788 if (int_usb & MUSB_INTR_RESET) { 825 789 handled = IRQ_HANDLED; 826 - if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) { 790 + if ((devctl & MUSB_DEVCTL_HM) != 0) { 827 791 /* 828 792 * Looks like non-HS BABBLE can be ignored, but 829 793 * HS BABBLE is an error condition. For HS the solution ··· 837 801 ERR("Stopping host session -- babble\n"); 838 802 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 839 803 } 840 - } else if (is_peripheral_capable()) { 804 + } else { 841 805 dev_dbg(musb->controller, "BUS RESET as %s\n", 842 806 otg_state_string(musb->xceiv->state)); 843 807 switch (musb->xceiv->state) { ··· 961 925 devctl = musb_readb(regs, MUSB_DEVCTL); 962 926 devctl &= ~MUSB_DEVCTL_SESSION; 963 927 964 - if (is_otg_enabled(musb)) { 965 - /* session started after: 966 - * (a) ID-grounded irq, host mode; 967 - * (b) vbus present/connect IRQ, peripheral mode; 968 - * (c) peripheral initiates, using SRP 969 - */ 970 - if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 971 - musb->is_active = 1; 972 - else 973 - devctl |= MUSB_DEVCTL_SESSION; 974 - 975 - } else if (is_host_enabled(musb)) { 976 - /* assume ID pin is hard-wired to ground */ 928 + /* session started after: 929 + * (a) ID-grounded irq, host mode; 930 + * (b) vbus present/connect IRQ, peripheral mode; 931 + * (c) peripheral initiates, using SRP 932 + */ 933 + if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 934 + musb->is_active = 1; 935 + else 977 936 devctl |= MUSB_DEVCTL_SESSION; 978 937 979 - } else /* peripheral is enabled */ { 980 - if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 981 - musb->is_active = 1; 982 - } 983 938 musb_platform_enable(musb); 984 939 musb_writeb(regs, MUSB_DEVCTL, devctl); 985 940 } ··· 1034 1007 musb_generic_disable(musb); 1035 1008 spin_unlock_irqrestore(&musb->lock, flags); 1036 1009 1037 - if (!is_otg_enabled(musb) && is_host_enabled(musb)) 1038 - usb_remove_hcd(musb_to_hcd(musb)); 1039 1010 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 1040 1011 musb_platform_exit(musb); 1041 1012 ··· 1327 1302 if (offset < 0) { 1328 1303 pr_debug("%s: mem overrun, ep %d\n", 1329 1304 musb_driver_name, epn); 1330 - return -EINVAL; 1305 + return offset; 1331 1306 } 1332 1307 epn++; 1333 1308 musb->nr_endpoints = max(epn, musb->nr_endpoints); ··· 1355 1330 { 1356 1331 u8 epnum = 0; 1357 1332 struct musb_hw_ep *hw_ep; 1358 - void *mbase = musb->mregs; 1333 + void __iomem *mbase = musb->mregs; 1359 1334 int ret = 0; 1360 1335 1361 1336 dev_dbg(musb->controller, "<== static silicon ep config\n"); ··· 1596 1571 /* musb_ep_select(musb->mregs, ep_num); */ 1597 1572 /* REVISIT just retval = ep->rx_irq(...) */ 1598 1573 retval = IRQ_HANDLED; 1599 - if (devctl & MUSB_DEVCTL_HM) { 1600 - if (is_host_capable()) 1601 - musb_host_rx(musb, ep_num); 1602 - } else { 1603 - if (is_peripheral_capable()) 1604 - musb_g_rx(musb, ep_num); 1605 - } 1574 + if (devctl & MUSB_DEVCTL_HM) 1575 + musb_host_rx(musb, ep_num); 1576 + else 1577 + musb_g_rx(musb, ep_num); 1606 1578 } 1607 1579 1608 1580 reg >>= 1; ··· 1614 1592 /* musb_ep_select(musb->mregs, ep_num); */ 1615 1593 /* REVISIT just retval |= ep->tx_irq(...) */ 1616 1594 retval = IRQ_HANDLED; 1617 - if (devctl & MUSB_DEVCTL_HM) { 1618 - if (is_host_capable()) 1619 - musb_host_tx(musb, ep_num); 1620 - } else { 1621 - if (is_peripheral_capable()) 1622 - musb_g_tx(musb, ep_num); 1623 - } 1595 + if (devctl & MUSB_DEVCTL_HM) 1596 + musb_host_tx(musb, ep_num); 1597 + else 1598 + musb_g_tx(musb, ep_num); 1624 1599 } 1625 1600 reg >>= 1; 1626 1601 ep_num++; ··· 1653 1634 } else { 1654 1635 /* endpoints 1..15 */ 1655 1636 if (transmit) { 1656 - if (devctl & MUSB_DEVCTL_HM) { 1657 - if (is_host_capable()) 1658 - musb_host_tx(musb, epnum); 1659 - } else { 1660 - if (is_peripheral_capable()) 1661 - musb_g_tx(musb, epnum); 1662 - } 1637 + if (devctl & MUSB_DEVCTL_HM) 1638 + musb_host_tx(musb, epnum); 1639 + else 1640 + musb_g_tx(musb, epnum); 1663 1641 } else { 1664 1642 /* receive */ 1665 - if (devctl & MUSB_DEVCTL_HM) { 1666 - if (is_host_capable()) 1667 - musb_host_rx(musb, epnum); 1668 - } else { 1669 - if (is_peripheral_capable()) 1670 - musb_g_rx(musb, epnum); 1671 - } 1643 + if (devctl & MUSB_DEVCTL_HM) 1644 + musb_host_rx(musb, epnum); 1645 + else 1646 + musb_g_rx(musb, epnum); 1672 1647 } 1673 1648 } 1674 1649 } ··· 1798 1785 static void musb_irq_work(struct work_struct *data) 1799 1786 { 1800 1787 struct musb *musb = container_of(data, struct musb, irq_work); 1801 - static int old_state; 1802 1788 1803 - if (musb->xceiv->state != old_state) { 1804 - old_state = musb->xceiv->state; 1789 + if (musb->xceiv->state != musb->xceiv_old_state) { 1790 + musb->xceiv_old_state = musb->xceiv->state; 1805 1791 sysfs_notify(&musb->controller->kobj, NULL, "mode"); 1806 1792 } 1807 1793 } ··· 1874 1862 dma_controller_destroy(c); 1875 1863 } 1876 1864 1877 - kfree(musb); 1865 + usb_put_hcd(musb_to_hcd(musb)); 1878 1866 } 1879 1867 1880 1868 /* 1881 1869 * Perform generic per-controller initialization. 1882 1870 * 1883 - * @pDevice: the controller (already clocked, etc) 1884 - * @nIrq: irq 1885 - * @mregs: virtual address of controller registers, 1871 + * @dev: the controller (already clocked, etc) 1872 + * @nIrq: IRQ number 1873 + * @ctrl: virtual address of controller registers, 1886 1874 * not yet corrected for platform-specific offsets 1887 1875 */ 1888 1876 static int __devinit ··· 1891 1879 int status; 1892 1880 struct musb *musb; 1893 1881 struct musb_hdrc_platform_data *plat = dev->platform_data; 1882 + struct usb_hcd *hcd; 1894 1883 1895 1884 /* The driver might handle more features than the board; OK. 1896 1885 * Fail when the board needs a feature that's not enabled. ··· 1914 1901 pm_runtime_enable(musb->controller); 1915 1902 1916 1903 spin_lock_init(&musb->lock); 1917 - musb->board_mode = plat->mode; 1918 1904 musb->board_set_power = plat->set_power; 1919 1905 musb->min_power = plat->min_power; 1920 1906 musb->ops = plat->platform_ops; ··· 1984 1972 goto fail3; 1985 1973 } 1986 1974 musb->nIrq = nIrq; 1987 - /* FIXME this handles wakeup irqs wrong */ 1975 + /* FIXME this handles wakeup irqs wrong */ 1988 1976 if (enable_irq_wake(nIrq) == 0) { 1989 1977 musb->irq_wake = 1; 1990 1978 device_init_wakeup(dev, 1); ··· 1993 1981 } 1994 1982 1995 1983 /* host side needs more setup */ 1996 - if (is_host_enabled(musb)) { 1997 - struct usb_hcd *hcd = musb_to_hcd(musb); 1984 + hcd = musb_to_hcd(musb); 1985 + otg_set_host(musb->xceiv->otg, &hcd->self); 1986 + hcd->self.otg_port = 1; 1987 + musb->xceiv->otg->host = &hcd->self; 1988 + hcd->power_budget = 2 * (plat->power ? : 250); 1998 1989 1999 - otg_set_host(musb->xceiv->otg, &hcd->self); 2000 - 2001 - if (is_otg_enabled(musb)) 2002 - hcd->self.otg_port = 1; 2003 - musb->xceiv->otg->host = &hcd->self; 2004 - hcd->power_budget = 2 * (plat->power ? : 250); 2005 - 2006 - /* program PHY to use external vBus if required */ 2007 - if (plat->extvbus) { 2008 - u8 busctl = musb_read_ulpi_buscontrol(musb->mregs); 2009 - busctl |= MUSB_ULPI_USE_EXTVBUS; 2010 - musb_write_ulpi_buscontrol(musb->mregs, busctl); 2011 - } 1990 + /* program PHY to use external vBus if required */ 1991 + if (plat->extvbus) { 1992 + u8 busctl = musb_read_ulpi_buscontrol(musb->mregs); 1993 + busctl |= MUSB_ULPI_USE_EXTVBUS; 1994 + musb_write_ulpi_buscontrol(musb->mregs, busctl); 2012 1995 } 2013 1996 2014 - /* For the host-only role, we can activate right away. 2015 - * (We expect the ID pin to be forcibly grounded!!) 2016 - * Otherwise, wait till the gadget driver hooks up. 2017 - */ 2018 - if (!is_otg_enabled(musb) && is_host_enabled(musb)) { 2019 - struct usb_hcd *hcd = musb_to_hcd(musb); 1997 + MUSB_DEV_MODE(musb); 1998 + musb->xceiv->otg->default_a = 0; 1999 + musb->xceiv->state = OTG_STATE_B_IDLE; 2020 2000 2021 - MUSB_HST_MODE(musb); 2022 - musb->xceiv->otg->default_a = 1; 2023 - musb->xceiv->state = OTG_STATE_A_IDLE; 2001 + status = musb_gadget_setup(musb); 2024 2002 2025 - status = usb_add_hcd(musb_to_hcd(musb), 0, 0); 2026 - 2027 - hcd->self.uses_pio_for_control = 1; 2028 - dev_dbg(musb->controller, "%s mode, status %d, devctl %02x %c\n", 2029 - "HOST", status, 2030 - musb_readb(musb->mregs, MUSB_DEVCTL), 2031 - (musb_readb(musb->mregs, MUSB_DEVCTL) 2032 - & MUSB_DEVCTL_BDEVICE 2033 - ? 'B' : 'A')); 2034 - 2035 - } else /* peripheral is enabled */ { 2036 - MUSB_DEV_MODE(musb); 2037 - musb->xceiv->otg->default_a = 0; 2038 - musb->xceiv->state = OTG_STATE_B_IDLE; 2039 - 2040 - status = musb_gadget_setup(musb); 2041 - 2042 - dev_dbg(musb->controller, "%s mode, status %d, dev%02x\n", 2043 - is_otg_enabled(musb) ? "OTG" : "PERIPHERAL", 2044 - status, 2045 - musb_readb(musb->mregs, MUSB_DEVCTL)); 2046 - 2047 - } 2048 2003 if (status < 0) 2049 2004 goto fail3; 2050 2005 ··· 2027 2048 2028 2049 pm_runtime_put(musb->controller); 2029 2050 2030 - dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n", 2031 - ({char *s; 2032 - switch (musb->board_mode) { 2033 - case MUSB_HOST: s = "Host"; break; 2034 - case MUSB_PERIPHERAL: s = "Peripheral"; break; 2035 - default: s = "OTG"; break; 2036 - }; s; }), 2037 - ctrl, 2038 - (is_dma_capable() && musb->dma_controller) 2039 - ? "DMA" : "PIO", 2040 - musb->nIrq); 2041 - 2042 2051 return 0; 2043 2052 2044 2053 fail5: 2045 2054 musb_exit_debugfs(musb); 2046 2055 2047 2056 fail4: 2048 - if (!is_otg_enabled(musb) && is_host_enabled(musb)) 2049 - usb_remove_hcd(musb_to_hcd(musb)); 2050 - else 2051 - musb_gadget_cleanup(musb); 2057 + musb_gadget_cleanup(musb); 2052 2058 2053 2059 fail3: 2054 2060 pm_runtime_put_sync(musb->controller); ··· 2060 2096 /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just 2061 2097 * bridge to a platform device; this driver then suffices. 2062 2098 */ 2063 - 2064 - #ifndef CONFIG_MUSB_PIO_ONLY 2065 - static u64 *orig_dma_mask; 2066 - #endif 2067 - 2068 2099 static int __devinit musb_probe(struct platform_device *pdev) 2069 2100 { 2070 2101 struct device *dev = &pdev->dev; ··· 2078 2119 return -ENOMEM; 2079 2120 } 2080 2121 2081 - #ifndef CONFIG_MUSB_PIO_ONLY 2082 - /* clobbered by use_dma=n */ 2083 - orig_dma_mask = dev->dma_mask; 2084 - #endif 2085 2122 status = musb_init_controller(dev, irq, base); 2086 2123 if (status < 0) 2087 2124 iounmap(base); ··· 2087 2132 2088 2133 static int __devexit musb_remove(struct platform_device *pdev) 2089 2134 { 2090 - struct musb *musb = dev_to_musb(&pdev->dev); 2135 + struct device *dev = &pdev->dev; 2136 + struct musb *musb = dev_to_musb(dev); 2091 2137 void __iomem *ctrl_base = musb->ctrl_base; 2092 2138 2093 2139 /* this gets called on rmmod. ··· 2101 2145 2102 2146 musb_free(musb); 2103 2147 iounmap(ctrl_base); 2104 - device_init_wakeup(&pdev->dev, 0); 2148 + device_init_wakeup(dev, 0); 2105 2149 #ifndef CONFIG_MUSB_PIO_ONLY 2106 - pdev->dev.dma_mask = orig_dma_mask; 2150 + dma_set_mask(dev, *dev->parent->dma_mask); 2107 2151 #endif 2108 2152 return 0; 2109 2153 } ··· 2116 2160 void __iomem *musb_base = musb->mregs; 2117 2161 void __iomem *epio; 2118 2162 2119 - if (is_host_enabled(musb)) { 2120 - musb->context.frame = musb_readw(musb_base, MUSB_FRAME); 2121 - musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE); 2122 - musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs); 2123 - } 2163 + musb->context.frame = musb_readw(musb_base, MUSB_FRAME); 2164 + musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE); 2165 + musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs); 2124 2166 musb->context.power = musb_readb(musb_base, MUSB_POWER); 2125 2167 musb->context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE); 2126 2168 musb->context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE); ··· 2157 2203 musb->context.index_regs[i].rxfifosz = 2158 2204 musb_read_rxfifosz(musb_base); 2159 2205 } 2160 - if (is_host_enabled(musb)) { 2161 - musb->context.index_regs[i].txtype = 2162 - musb_readb(epio, MUSB_TXTYPE); 2163 - musb->context.index_regs[i].txinterval = 2164 - musb_readb(epio, MUSB_TXINTERVAL); 2165 - musb->context.index_regs[i].rxtype = 2166 - musb_readb(epio, MUSB_RXTYPE); 2167 - musb->context.index_regs[i].rxinterval = 2168 - musb_readb(epio, MUSB_RXINTERVAL); 2169 2206 2170 - musb->context.index_regs[i].txfunaddr = 2171 - musb_read_txfunaddr(musb_base, i); 2172 - musb->context.index_regs[i].txhubaddr = 2173 - musb_read_txhubaddr(musb_base, i); 2174 - musb->context.index_regs[i].txhubport = 2175 - musb_read_txhubport(musb_base, i); 2207 + musb->context.index_regs[i].txtype = 2208 + musb_readb(epio, MUSB_TXTYPE); 2209 + musb->context.index_regs[i].txinterval = 2210 + musb_readb(epio, MUSB_TXINTERVAL); 2211 + musb->context.index_regs[i].rxtype = 2212 + musb_readb(epio, MUSB_RXTYPE); 2213 + musb->context.index_regs[i].rxinterval = 2214 + musb_readb(epio, MUSB_RXINTERVAL); 2176 2215 2177 - musb->context.index_regs[i].rxfunaddr = 2178 - musb_read_rxfunaddr(musb_base, i); 2179 - musb->context.index_regs[i].rxhubaddr = 2180 - musb_read_rxhubaddr(musb_base, i); 2181 - musb->context.index_regs[i].rxhubport = 2182 - musb_read_rxhubport(musb_base, i); 2183 - } 2216 + musb->context.index_regs[i].txfunaddr = 2217 + musb_read_txfunaddr(musb_base, i); 2218 + musb->context.index_regs[i].txhubaddr = 2219 + musb_read_txhubaddr(musb_base, i); 2220 + musb->context.index_regs[i].txhubport = 2221 + musb_read_txhubport(musb_base, i); 2222 + 2223 + musb->context.index_regs[i].rxfunaddr = 2224 + musb_read_rxfunaddr(musb_base, i); 2225 + musb->context.index_regs[i].rxhubaddr = 2226 + musb_read_rxhubaddr(musb_base, i); 2227 + musb->context.index_regs[i].rxhubport = 2228 + musb_read_rxhubport(musb_base, i); 2184 2229 } 2185 2230 } 2186 2231 ··· 2190 2237 void __iomem *ep_target_regs; 2191 2238 void __iomem *epio; 2192 2239 2193 - if (is_host_enabled(musb)) { 2194 - musb_writew(musb_base, MUSB_FRAME, musb->context.frame); 2195 - musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode); 2196 - musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl); 2197 - } 2240 + musb_writew(musb_base, MUSB_FRAME, musb->context.frame); 2241 + musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode); 2242 + musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl); 2198 2243 musb_writeb(musb_base, MUSB_POWER, musb->context.power); 2199 2244 musb_writew(musb_base, MUSB_INTRTXE, musb->context.intrtxe); 2200 2245 musb_writew(musb_base, MUSB_INTRRXE, musb->context.intrrxe); ··· 2231 2280 musb->context.index_regs[i].rxfifoadd); 2232 2281 } 2233 2282 2234 - if (is_host_enabled(musb)) { 2235 - musb_writeb(epio, MUSB_TXTYPE, 2283 + musb_writeb(epio, MUSB_TXTYPE, 2236 2284 musb->context.index_regs[i].txtype); 2237 - musb_writeb(epio, MUSB_TXINTERVAL, 2285 + musb_writeb(epio, MUSB_TXINTERVAL, 2238 2286 musb->context.index_regs[i].txinterval); 2239 - musb_writeb(epio, MUSB_RXTYPE, 2287 + musb_writeb(epio, MUSB_RXTYPE, 2240 2288 musb->context.index_regs[i].rxtype); 2241 - musb_writeb(epio, MUSB_RXINTERVAL, 2289 + musb_writeb(epio, MUSB_RXINTERVAL, 2242 2290 2243 - musb->context.index_regs[i].rxinterval); 2244 - musb_write_txfunaddr(musb_base, i, 2291 + musb->context.index_regs[i].rxinterval); 2292 + musb_write_txfunaddr(musb_base, i, 2245 2293 musb->context.index_regs[i].txfunaddr); 2246 - musb_write_txhubaddr(musb_base, i, 2294 + musb_write_txhubaddr(musb_base, i, 2247 2295 musb->context.index_regs[i].txhubaddr); 2248 - musb_write_txhubport(musb_base, i, 2296 + musb_write_txhubport(musb_base, i, 2249 2297 musb->context.index_regs[i].txhubport); 2250 2298 2251 - ep_target_regs = 2252 - musb_read_target_reg_base(i, musb_base); 2299 + ep_target_regs = 2300 + musb_read_target_reg_base(i, musb_base); 2253 2301 2254 - musb_write_rxfunaddr(ep_target_regs, 2302 + musb_write_rxfunaddr(ep_target_regs, 2255 2303 musb->context.index_regs[i].rxfunaddr); 2256 - musb_write_rxhubaddr(ep_target_regs, 2304 + musb_write_rxhubaddr(ep_target_regs, 2257 2305 musb->context.index_regs[i].rxhubaddr); 2258 - musb_write_rxhubport(ep_target_regs, 2306 + musb_write_rxhubport(ep_target_regs, 2259 2307 musb->context.index_regs[i].rxhubport); 2260 - } 2261 2308 } 2262 2309 musb_writeb(musb_base, MUSB_INDEX, musb->context.index); 2263 2310 }
+7 -10
drivers/usb/musb/musb_core.h
··· 71 71 #include <linux/usb/hcd.h> 72 72 #include "musb_host.h" 73 73 74 - #define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST) 75 - #define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL) 76 - #define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG) 77 - 78 74 /* NOTE: otg and peripheral-only state machines start at B_IDLE. 79 75 * OTG or host-only go to A_IDLE when ID is sensed. 80 76 */ ··· 84 88 85 89 /****************************** PERIPHERAL ROLE *****************************/ 86 90 87 - #define is_peripheral_capable() (1) 88 - 89 91 extern irqreturn_t musb_g_ep0_irq(struct musb *); 90 92 extern void musb_g_tx(struct musb *, u8); 91 93 extern void musb_g_rx(struct musb *, u8); ··· 94 100 extern void musb_g_disconnect(struct musb *); 95 101 96 102 /****************************** HOST ROLE ***********************************/ 97 - 98 - #define is_host_capable() (1) 99 103 100 104 extern irqreturn_t musb_h_ep0_irq(struct musb *); 101 105 extern void musb_host_tx(struct musb *, u8); ··· 368 376 u16 epmask; 369 377 u8 nr_endpoints; 370 378 371 - u8 board_mode; /* enum musb_mode */ 372 379 int (*board_set_power)(int state); 373 380 374 381 u8 min_power; /* vbus for periph, in mA/2 */ ··· 437 446 #ifdef MUSB_CONFIG_PROC_FS 438 447 struct proc_dir_entry *proc_entry; 439 448 #endif 449 + int xceiv_old_state; 450 + #ifdef CONFIG_DEBUG_FS 451 + struct dentry *debugfs_root; 452 + #endif 440 453 }; 441 454 442 455 static inline struct musb *gadget_to_musb(struct usb_gadget *g) ··· 479 484 static inline int musb_read_fifosize(struct musb *musb, 480 485 struct musb_hw_ep *hw_ep, u8 epnum) 481 486 { 482 - void *mbase = musb->mregs; 487 + void __iomem *mbase = musb->mregs; 483 488 u8 reg = 0; 484 489 485 490 /* read from core using indexed model */ ··· 521 526 522 527 extern void musb_start(struct musb *musb); 523 528 extern void musb_stop(struct musb *musb); 529 + extern int musb_get_id(struct device *dev, gfp_t gfp_mask); 530 + extern void musb_put_id(struct device *dev, int id); 524 531 525 532 extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src); 526 533 extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
+3 -5
drivers/usb/musb/musb_debugfs.c
··· 103 103 { } /* Terminating Entry */ 104 104 }; 105 105 106 - static struct dentry *musb_debugfs_root; 107 - 108 106 static int musb_regdump_show(struct seq_file *s, void *unused) 109 107 { 110 108 struct musb *musb = s->private; ··· 239 241 struct dentry *file; 240 242 int ret; 241 243 242 - root = debugfs_create_dir("musb", NULL); 244 + root = debugfs_create_dir(dev_name(musb->controller), NULL); 243 245 if (!root) { 244 246 ret = -ENOMEM; 245 247 goto err0; ··· 259 261 goto err1; 260 262 } 261 263 262 - musb_debugfs_root = root; 264 + musb->debugfs_root = root; 263 265 264 266 return 0; 265 267 ··· 272 274 273 275 void /* __init_or_exit */ musb_exit_debugfs(struct musb *musb) 274 276 { 275 - debugfs_remove_recursive(musb_debugfs_root); 277 + debugfs_remove_recursive(musb->debugfs_root); 276 278 }
+1 -1
drivers/usb/musb/musb_dma.h
··· 178 178 extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit); 179 179 180 180 181 - extern struct dma_controller *__init 181 + extern struct dma_controller *__devinit 182 182 dma_controller_create(struct musb *, void __iomem *); 183 183 184 184 extern void dma_controller_destroy(struct dma_controller *);
+126 -71
drivers/usb/musb/musb_dsps.c
··· 31 31 32 32 #include <linux/init.h> 33 33 #include <linux/io.h> 34 + #include <linux/of.h> 34 35 #include <linux/err.h> 35 36 #include <linux/platform_device.h> 36 37 #include <linux/dma-mapping.h> ··· 46 45 #include <plat/usb.h> 47 46 48 47 #include "musb_core.h" 48 + 49 + #ifdef CONFIG_OF 50 + static const struct of_device_id musb_dsps_of_match[]; 51 + #endif 49 52 50 53 /** 51 54 * avoid using musb_readx()/musb_writex() as glue layer should not be ··· 111 106 /* miscellaneous stuff */ 112 107 u32 musb_core_offset; 113 108 u8 poll_seconds; 109 + /* number of musb instances */ 110 + u8 instances; 114 111 }; 115 112 116 113 /** ··· 120 113 */ 121 114 struct dsps_glue { 122 115 struct device *dev; 123 - struct platform_device *musb; /* child musb pdev */ 116 + struct platform_device *musb[2]; /* child musb pdev */ 124 117 const struct dsps_musb_wrapper *wrp; /* wrapper register offsets */ 125 - struct timer_list timer; /* otg_workaround timer */ 118 + struct timer_list timer[2]; /* otg_workaround timer */ 119 + unsigned long last_timer[2]; /* last timer data for each instance */ 126 120 }; 127 121 128 122 /** ··· 146 138 dsps_writel(reg_base, wrp->epintr_set, epmask); 147 139 dsps_writel(reg_base, wrp->coreintr_set, coremask); 148 140 /* Force the DRVVBUS IRQ so we can start polling for ID change. */ 149 - if (is_otg_enabled(musb)) 150 - dsps_writel(reg_base, wrp->coreintr_set, 151 - (1 << wrp->drvvbus) << wrp->usb_shift); 141 + dsps_writel(reg_base, wrp->coreintr_set, 142 + (1 << wrp->drvvbus) << wrp->usb_shift); 152 143 } 153 144 154 145 /** ··· 173 166 struct musb *musb = (void *)_musb; 174 167 void __iomem *mregs = musb->mregs; 175 168 struct device *dev = musb->controller; 176 - struct platform_device *pdev = to_platform_device(dev->parent); 177 - struct dsps_glue *glue = platform_get_drvdata(pdev); 169 + struct platform_device *pdev = to_platform_device(dev); 170 + struct dsps_glue *glue = dev_get_drvdata(dev->parent); 178 171 const struct dsps_musb_wrapper *wrp = glue->wrp; 179 172 u8 devctl; 180 173 unsigned long flags; ··· 208 201 MUSB_INTR_VBUSERROR << wrp->usb_shift); 209 202 break; 210 203 case OTG_STATE_B_IDLE: 211 - if (!is_peripheral_enabled(musb)) 212 - break; 213 - 214 204 devctl = dsps_readb(mregs, MUSB_DEVCTL); 215 205 if (devctl & MUSB_DEVCTL_BDEVICE) 216 - mod_timer(&glue->timer, 206 + mod_timer(&glue->timer[pdev->id], 217 207 jiffies + wrp->poll_seconds * HZ); 218 208 else 219 209 musb->xceiv->state = OTG_STATE_A_IDLE; ··· 224 220 static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout) 225 221 { 226 222 struct device *dev = musb->controller; 227 - struct platform_device *pdev = to_platform_device(dev->parent); 228 - struct dsps_glue *glue = platform_get_drvdata(pdev); 229 - static unsigned long last_timer; 230 - 231 - if (!is_otg_enabled(musb)) 232 - return; 223 + struct platform_device *pdev = to_platform_device(dev); 224 + struct dsps_glue *glue = dev_get_drvdata(dev->parent); 233 225 234 226 if (timeout == 0) 235 227 timeout = jiffies + msecs_to_jiffies(3); ··· 235 235 musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { 236 236 dev_dbg(musb->controller, "%s active, deleting timer\n", 237 237 otg_state_string(musb->xceiv->state)); 238 - del_timer(&glue->timer); 239 - last_timer = jiffies; 238 + del_timer(&glue->timer[pdev->id]); 239 + glue->last_timer[pdev->id] = jiffies; 240 240 return; 241 241 } 242 242 243 - if (time_after(last_timer, timeout) && timer_pending(&glue->timer)) { 243 + if (time_after(glue->last_timer[pdev->id], timeout) && 244 + timer_pending(&glue->timer[pdev->id])) { 244 245 dev_dbg(musb->controller, 245 246 "Longer idle timer already pending, ignoring...\n"); 246 247 return; 247 248 } 248 - last_timer = timeout; 249 + glue->last_timer[pdev->id] = timeout; 249 250 250 251 dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n", 251 252 otg_state_string(musb->xceiv->state), 252 253 jiffies_to_msecs(timeout - jiffies)); 253 - mod_timer(&glue->timer, timeout); 254 + mod_timer(&glue->timer[pdev->id], timeout); 254 255 } 255 256 256 257 static irqreturn_t dsps_interrupt(int irq, void *hci) ··· 259 258 struct musb *musb = hci; 260 259 void __iomem *reg_base = musb->ctrl_base; 261 260 struct device *dev = musb->controller; 262 - struct platform_device *pdev = to_platform_device(dev->parent); 263 - struct dsps_glue *glue = platform_get_drvdata(pdev); 261 + struct platform_device *pdev = to_platform_device(dev); 262 + struct dsps_glue *glue = dev_get_drvdata(dev->parent); 264 263 const struct dsps_musb_wrapper *wrp = glue->wrp; 265 264 unsigned long flags; 266 265 irqreturn_t ret = IRQ_NONE; ··· 295 294 * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. 296 295 * Also, DRVVBUS pulses for SRP (but not at 5V) ... 297 296 */ 298 - if ((usbintr & MUSB_INTR_BABBLE) && is_host_enabled(musb)) 297 + if (usbintr & MUSB_INTR_BABBLE) 299 298 pr_info("CAUTION: musb: Babble Interrupt Occured\n"); 300 299 301 300 if (usbintr & ((1 << wrp->drvvbus) << wrp->usb_shift)) { ··· 304 303 u8 devctl = dsps_readb(mregs, MUSB_DEVCTL); 305 304 int err; 306 305 307 - err = is_host_enabled(musb) && (musb->int_usb & 308 - MUSB_INTR_VBUSERROR); 306 + err = musb->int_usb & MUSB_INTR_VBUSERROR; 309 307 if (err) { 310 308 /* 311 309 * The Mentor core doesn't debounce VBUS as needed ··· 319 319 */ 320 320 musb->int_usb &= ~MUSB_INTR_VBUSERROR; 321 321 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; 322 - mod_timer(&glue->timer, 322 + mod_timer(&glue->timer[pdev->id], 323 323 jiffies + wrp->poll_seconds * HZ); 324 324 WARNING("VBUS error workaround (delay coming)\n"); 325 - } else if (is_host_enabled(musb) && drvvbus) { 325 + } else if (drvvbus) { 326 326 musb->is_active = 1; 327 327 MUSB_HST_MODE(musb); 328 328 musb->xceiv->otg->default_a = 1; 329 329 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; 330 - del_timer(&glue->timer); 330 + del_timer(&glue->timer[pdev->id]); 331 331 } else { 332 332 musb->is_active = 0; 333 333 MUSB_DEV_MODE(musb); ··· 353 353 dsps_writel(reg_base, wrp->eoi, 1); 354 354 355 355 /* Poll for ID change */ 356 - if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE) 357 - mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ); 356 + if (musb->xceiv->state == OTG_STATE_B_IDLE) 357 + mod_timer(&glue->timer[pdev->id], 358 + jiffies + wrp->poll_seconds * HZ); 358 359 359 360 spin_unlock_irqrestore(&musb->lock, flags); 360 361 ··· 366 365 { 367 366 struct device *dev = musb->controller; 368 367 struct musb_hdrc_platform_data *plat = dev->platform_data; 369 - struct platform_device *pdev = to_platform_device(dev->parent); 370 - struct dsps_glue *glue = platform_get_drvdata(pdev); 368 + struct platform_device *pdev = to_platform_device(dev); 369 + struct dsps_glue *glue = dev_get_drvdata(dev->parent); 371 370 const struct dsps_musb_wrapper *wrp = glue->wrp; 372 371 struct omap_musb_board_data *data = plat->board_data; 373 372 void __iomem *reg_base = musb->ctrl_base; ··· 377 376 /* mentor core register starts at offset of 0x400 from musb base */ 378 377 musb->mregs += wrp->musb_core_offset; 379 378 380 - /* NOP driver needs change if supporting dual instance */ 381 - usb_nop_xceiv_register(); 379 + /* Get the NOP PHY */ 382 380 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); 383 381 if (IS_ERR_OR_NULL(musb->xceiv)) 384 382 return -ENODEV; ··· 389 389 goto err0; 390 390 } 391 391 392 - if (is_host_enabled(musb)) 393 - setup_timer(&glue->timer, otg_timer, (unsigned long) musb); 392 + setup_timer(&glue->timer[pdev->id], otg_timer, (unsigned long) musb); 394 393 395 394 /* Reset the musb */ 396 395 dsps_writel(reg_base, wrp->control, (1 << wrp->reset)); ··· 420 421 struct device *dev = musb->controller; 421 422 struct musb_hdrc_platform_data *plat = dev->platform_data; 422 423 struct omap_musb_board_data *data = plat->board_data; 423 - struct platform_device *pdev = to_platform_device(dev->parent); 424 - struct dsps_glue *glue = platform_get_drvdata(pdev); 424 + struct platform_device *pdev = to_platform_device(dev); 425 + struct dsps_glue *glue = dev_get_drvdata(dev->parent); 425 426 426 - if (is_host_enabled(musb)) 427 - del_timer_sync(&glue->timer); 427 + del_timer_sync(&glue->timer[pdev->id]); 428 428 429 429 /* Shutdown the on-chip PHY and its PLL. */ 430 430 if (data->set_phy_power) ··· 453 455 struct device *dev = glue->dev; 454 456 struct platform_device *pdev = to_platform_device(dev); 455 457 struct musb_hdrc_platform_data *pdata = dev->platform_data; 458 + struct device_node *np = pdev->dev.of_node; 459 + struct musb_hdrc_config *config; 456 460 struct platform_device *musb; 457 461 struct resource *res; 458 462 struct resource resources[2]; 459 463 char res_name[10]; 460 - int ret; 464 + int ret, musbid; 461 465 462 466 /* get memory resource */ 463 467 sprintf(res_name, "musb%d", id); ··· 484 484 resources[1] = *res; 485 485 resources[1].name = "mc"; 486 486 487 - /* allocate the child platform device */ 488 - musb = platform_device_alloc("musb-hdrc", -1); 489 - if (!musb) { 490 - dev_err(dev, "failed to allocate musb device\n"); 487 + /* get the musb id */ 488 + musbid = musb_get_id(dev, GFP_KERNEL); 489 + if (musbid < 0) { 490 + dev_err(dev, "failed to allocate musb id\n"); 491 491 ret = -ENOMEM; 492 492 goto err0; 493 493 } 494 + /* allocate the child platform device */ 495 + musb = platform_device_alloc("musb-hdrc", musbid); 496 + if (!musb) { 497 + dev_err(dev, "failed to allocate musb device\n"); 498 + ret = -ENOMEM; 499 + goto err1; 500 + } 494 501 502 + musb->id = musbid; 495 503 musb->dev.parent = dev; 496 504 musb->dev.dma_mask = &musb_dmamask; 497 505 musb->dev.coherent_dma_mask = musb_dmamask; 498 506 499 - glue->musb = musb; 500 - 501 - pdata->platform_ops = &dsps_ops; 507 + glue->musb[id] = musb; 502 508 503 509 ret = platform_device_add_resources(musb, resources, 2); 504 510 if (ret) { 505 511 dev_err(dev, "failed to add resources\n"); 506 - goto err1; 512 + goto err2; 507 513 } 514 + 515 + if (np) { 516 + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 517 + if (!pdata) { 518 + dev_err(&pdev->dev, 519 + "failed to allocate musb platfrom data\n"); 520 + ret = -ENOMEM; 521 + goto err2; 522 + } 523 + 524 + config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL); 525 + if (!config) { 526 + dev_err(&pdev->dev, 527 + "failed to allocate musb hdrc config\n"); 528 + goto err2; 529 + } 530 + 531 + of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps); 532 + of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits); 533 + sprintf(res_name, "port%d-mode", id); 534 + of_property_read_u32(np, res_name, (u32 *)&pdata->mode); 535 + of_property_read_u32(np, "power", (u32 *)&pdata->power); 536 + config->multipoint = of_property_read_bool(np, "multipoint"); 537 + 538 + pdata->config = config; 539 + } 540 + 541 + pdata->platform_ops = &dsps_ops; 508 542 509 543 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 510 544 if (ret) { 511 545 dev_err(dev, "failed to add platform_data\n"); 512 - goto err1; 546 + goto err2; 513 547 } 514 548 515 549 ret = platform_device_add(musb); 516 550 if (ret) { 517 551 dev_err(dev, "failed to register musb device\n"); 518 - goto err1; 552 + goto err2; 519 553 } 520 554 521 555 return 0; 522 556 523 - err1: 557 + err2: 524 558 platform_device_put(musb); 559 + err1: 560 + musb_put_id(dev, musbid); 525 561 err0: 526 562 return ret; 527 563 } 528 564 529 - static void __devexit dsps_delete_musb_pdev(struct dsps_glue *glue) 565 + static void dsps_delete_musb_pdev(struct dsps_glue *glue, u8 id) 530 566 { 531 - platform_device_del(glue->musb); 532 - platform_device_put(glue->musb); 567 + musb_put_id(glue->dev, glue->musb[id]->id); 568 + platform_device_del(glue->musb[id]); 569 + platform_device_put(glue->musb[id]); 533 570 } 534 571 535 572 static int __devinit dsps_probe(struct platform_device *pdev) 536 573 { 537 - const struct platform_device_id *id = platform_get_device_id(pdev); 538 - const struct dsps_musb_wrapper *wrp = 539 - (struct dsps_musb_wrapper *)id->driver_data; 574 + struct device_node *np = pdev->dev.of_node; 575 + const struct of_device_id *match; 576 + const struct dsps_musb_wrapper *wrp; 540 577 struct dsps_glue *glue; 541 578 struct resource *iomem; 542 - int ret; 579 + int ret, i; 580 + 581 + match = of_match_node(musb_dsps_of_match, np); 582 + if (!match) { 583 + dev_err(&pdev->dev, "fail to get matching of_match struct\n"); 584 + ret = -EINVAL; 585 + goto err0; 586 + } 587 + wrp = match->data; 543 588 544 589 /* allocate glue */ 545 590 glue = kzalloc(sizeof(*glue), GFP_KERNEL); ··· 621 576 goto err2; 622 577 } 623 578 624 - /* create the child platform device for first instances of musb */ 625 - ret = dsps_create_musb_pdev(glue, 0); 626 - if (ret != 0) { 627 - dev_err(&pdev->dev, "failed to create child pdev\n"); 628 - goto err3; 579 + /* create the child platform device for all instances of musb */ 580 + for (i = 0; i < wrp->instances ; i++) { 581 + ret = dsps_create_musb_pdev(glue, i); 582 + if (ret != 0) { 583 + dev_err(&pdev->dev, "failed to create child pdev\n"); 584 + /* release resources of previously created instances */ 585 + for (i--; i >= 0 ; i--) 586 + dsps_delete_musb_pdev(glue, i); 587 + goto err3; 588 + } 629 589 } 630 590 631 591 return 0; ··· 648 598 static int __devexit dsps_remove(struct platform_device *pdev) 649 599 { 650 600 struct dsps_glue *glue = platform_get_drvdata(pdev); 601 + const struct dsps_musb_wrapper *wrp = glue->wrp; 602 + int i; 651 603 652 604 /* delete the child platform device */ 653 - dsps_delete_musb_pdev(glue); 605 + for (i = 0; i < wrp->instances ; i++) 606 + dsps_delete_musb_pdev(glue, i); 654 607 655 608 /* disable usbss clocks */ 656 609 pm_runtime_put(&pdev->dev); ··· 719 666 .rxep_bitmap = (0xfffe << 16), 720 667 .musb_core_offset = 0x400, 721 668 .poll_seconds = 2, 669 + .instances = 2, 722 670 }; 723 671 724 672 static const struct platform_device_id musb_dsps_id_table[] __devinitconst = { ··· 731 677 }; 732 678 MODULE_DEVICE_TABLE(platform, musb_dsps_id_table); 733 679 680 + #ifdef CONFIG_OF 734 681 static const struct of_device_id musb_dsps_of_match[] __devinitconst = { 735 - { .compatible = "musb-ti81xx", }, 736 - { .compatible = "ti,ti81xx-musb", }, 737 - { .compatible = "ti,am335x-musb", }, 682 + { .compatible = "ti,musb-am33xx", 683 + .data = (void *) &ti81xx_driver_data, }, 738 684 { }, 739 685 }; 740 686 MODULE_DEVICE_TABLE(of, musb_dsps_of_match); 687 + #endif 741 688 742 689 static struct platform_driver dsps_usbss_driver = { 743 690 .probe = dsps_probe, ··· 746 691 .driver = { 747 692 .name = "musb-dsps", 748 693 .pm = &dsps_pm_ops, 749 - .of_match_table = musb_dsps_of_match, 694 + .of_match_table = of_match_ptr(musb_dsps_of_match), 750 695 }, 751 696 .id_table = musb_dsps_id_table, 752 697 };
+64 -112
drivers/usb/musb/musb_gadget.c
··· 373 373 request_size = min_t(size_t, request->length - request->actual, 374 374 musb_ep->dma->max_len); 375 375 376 - use_dma = (request->dma != DMA_ADDR_INVALID); 376 + use_dma = (request->dma != DMA_ADDR_INVALID && request_size); 377 377 378 378 /* MUSB_TXCSR_P_ISO is still set correctly */ 379 379 ··· 644 644 struct usb_request *request = &req->request; 645 645 struct musb_ep *musb_ep; 646 646 void __iomem *epio = musb->endpoints[epnum].regs; 647 - unsigned fifo_count = 0; 648 - u16 len; 647 + unsigned len = 0; 648 + u16 fifo_count; 649 649 u16 csr = musb_readw(epio, MUSB_RXCSR); 650 650 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; 651 651 u8 use_mode_1; ··· 655 655 else 656 656 musb_ep = &hw_ep->ep_out; 657 657 658 - len = musb_ep->packet_sz; 658 + fifo_count = musb_ep->packet_sz; 659 659 660 660 /* Check if EP is disabled */ 661 661 if (!musb_ep->desc) { ··· 704 704 } 705 705 706 706 if (csr & MUSB_RXCSR_RXPKTRDY) { 707 - len = musb_readw(epio, MUSB_RXCOUNT); 707 + fifo_count = musb_readw(epio, MUSB_RXCOUNT); 708 708 709 709 /* 710 - * Enable Mode 1 on RX transfers only when short_not_ok flag 711 - * is set. Currently short_not_ok flag is set only from 712 - * file_storage and f_mass_storage drivers 710 + * use mode 1 only if we expect data of at least ep packet_sz 711 + * and have not yet received a short packet 713 712 */ 714 - 715 - if (request->short_not_ok && len == musb_ep->packet_sz) 713 + if ((request->length - request->actual >= musb_ep->packet_sz) && 714 + (fifo_count >= musb_ep->packet_sz)) 716 715 use_mode_1 = 1; 717 716 else 718 717 use_mode_1 = 0; ··· 722 723 struct dma_controller *c; 723 724 struct dma_channel *channel; 724 725 int use_dma = 0; 726 + int transfer_size; 725 727 726 728 c = musb->dma_controller; 727 729 channel = musb_ep->dma; 728 - 729 - /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in 730 - * mode 0 only. So we do not get endpoint interrupts due to DMA 731 - * completion. We only get interrupts from DMA controller. 732 - * 733 - * We could operate in DMA mode 1 if we knew the size of the tranfer 734 - * in advance. For mass storage class, request->length = what the host 735 - * sends, so that'd work. But for pretty much everything else, 736 - * request->length is routinely more than what the host sends. For 737 - * most these gadgets, end of is signified either by a short packet, 738 - * or filling the last byte of the buffer. (Sending extra data in 739 - * that last pckate should trigger an overflow fault.) But in mode 1, 740 - * we don't get DMA completion interrupt for short packets. 741 - * 742 - * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), 743 - * to get endpoint interrupt on every DMA req, but that didn't seem 744 - * to work reliably. 745 - * 746 - * REVISIT an updated g_file_storage can set req->short_not_ok, which 747 - * then becomes usable as a runtime "use mode 1" hint... 748 - */ 749 730 750 731 /* Experimental: Mode1 works with mass storage use cases */ 751 732 if (use_mode_1) { ··· 743 764 csr | MUSB_RXCSR_DMAMODE); 744 765 musb_writew(epio, MUSB_RXCSR, csr); 745 766 767 + transfer_size = min(request->length - request->actual, 768 + channel->max_len); 769 + musb_ep->dma->desired_mode = 1; 770 + 746 771 } else { 747 772 if (!musb_ep->hb_mult && 748 773 musb_ep->hw_ep->rx_double_buffered) 749 774 csr |= MUSB_RXCSR_AUTOCLEAR; 750 775 csr |= MUSB_RXCSR_DMAENAB; 751 776 musb_writew(epio, MUSB_RXCSR, csr); 777 + 778 + transfer_size = min(request->length - request->actual, 779 + (unsigned)fifo_count); 780 + musb_ep->dma->desired_mode = 0; 752 781 } 753 782 754 - if (request->actual < request->length) { 755 - int transfer_size = 0; 756 - if (use_mode_1) { 757 - transfer_size = min(request->length - request->actual, 758 - channel->max_len); 759 - musb_ep->dma->desired_mode = 1; 760 - } else { 761 - transfer_size = min(request->length - request->actual, 762 - (unsigned)len); 763 - musb_ep->dma->desired_mode = 0; 764 - } 765 - 766 - use_dma = c->channel_program( 767 - channel, 768 - musb_ep->packet_sz, 769 - channel->desired_mode, 770 - request->dma 771 - + request->actual, 772 - transfer_size); 773 - } 783 + use_dma = c->channel_program( 784 + channel, 785 + musb_ep->packet_sz, 786 + channel->desired_mode, 787 + request->dma 788 + + request->actual, 789 + transfer_size); 774 790 775 791 if (use_dma) 776 792 return; ··· 782 808 channel = musb_ep->dma; 783 809 784 810 /* In case first packet is short */ 785 - if (len < musb_ep->packet_sz) 786 - transfer_size = len; 811 + if (fifo_count < musb_ep->packet_sz) 812 + transfer_size = fifo_count; 787 813 else if (request->short_not_ok) 788 814 transfer_size = min(request->length - 789 815 request->actual, ··· 791 817 else 792 818 transfer_size = min(request->length - 793 819 request->actual, 794 - (unsigned)len); 820 + (unsigned)fifo_count); 795 821 796 822 csr &= ~MUSB_RXCSR_DMAMODE; 797 823 csr |= (MUSB_RXCSR_DMAENAB | ··· 819 845 } 820 846 #endif /* Mentor's DMA */ 821 847 822 - fifo_count = request->length - request->actual; 848 + len = request->length - request->actual; 823 849 dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", 824 850 musb_ep->end_point.name, 825 - len, fifo_count, 851 + fifo_count, len, 826 852 musb_ep->packet_sz); 827 853 828 854 fifo_count = min_t(unsigned, len, fifo_count); ··· 875 901 } 876 902 877 903 /* reach the end or short packet detected */ 878 - if (request->actual == request->length || len < musb_ep->packet_sz) 904 + if (request->actual == request->length || 905 + fifo_count < musb_ep->packet_sz) 879 906 musb_g_giveback(musb_ep, request, 0); 880 907 } 881 908 ··· 1860 1885 musb->g.dev.release = musb_gadget_release; 1861 1886 musb->g.name = musb_driver_name; 1862 1887 1863 - if (is_otg_enabled(musb)) 1864 - musb->g.is_otg = 1; 1888 + musb->g.is_otg = 1; 1865 1889 1866 1890 musb_g_init_endpoints(musb); 1867 1891 ··· 1906 1932 { 1907 1933 struct musb *musb = gadget_to_musb(g); 1908 1934 struct usb_otg *otg = musb->xceiv->otg; 1935 + struct usb_hcd *hcd = musb_to_hcd(musb); 1909 1936 unsigned long flags; 1910 - int retval = -EINVAL; 1937 + int retval = 0; 1911 1938 1912 - if (driver->max_speed < USB_SPEED_HIGH) 1913 - goto err0; 1939 + if (driver->max_speed < USB_SPEED_HIGH) { 1940 + retval = -EINVAL; 1941 + goto err; 1942 + } 1914 1943 1915 1944 pm_runtime_get_sync(musb->controller); 1916 1945 ··· 1927 1950 1928 1951 otg_set_peripheral(otg, &musb->g); 1929 1952 musb->xceiv->state = OTG_STATE_B_IDLE; 1930 - 1931 - /* 1932 - * FIXME this ignores the softconnect flag. Drivers are 1933 - * allowed hold the peripheral inactive until for example 1934 - * userspace hooks up printer hardware or DSP codecs, so 1935 - * hosts only see fully functional devices. 1936 - */ 1937 - 1938 - if (!is_otg_enabled(musb)) 1939 - musb_start(musb); 1940 - 1941 1953 spin_unlock_irqrestore(&musb->lock, flags); 1942 1954 1943 - if (is_otg_enabled(musb)) { 1944 - struct usb_hcd *hcd = musb_to_hcd(musb); 1945 - 1946 - dev_dbg(musb->controller, "OTG startup...\n"); 1947 - 1948 - /* REVISIT: funcall to other code, which also 1949 - * handles power budgeting ... this way also 1950 - * ensures HdrcStart is indirectly called. 1951 - */ 1952 - retval = usb_add_hcd(musb_to_hcd(musb), 0, 0); 1953 - if (retval < 0) { 1954 - dev_dbg(musb->controller, "add_hcd failed, %d\n", retval); 1955 - goto err2; 1956 - } 1957 - 1958 - if ((musb->xceiv->last_event == USB_EVENT_ID) 1959 - && otg->set_vbus) 1960 - otg_set_vbus(otg, 1); 1961 - 1962 - hcd->self.uses_pio_for_control = 1; 1955 + /* REVISIT: funcall to other code, which also 1956 + * handles power budgeting ... this way also 1957 + * ensures HdrcStart is indirectly called. 1958 + */ 1959 + retval = usb_add_hcd(hcd, 0, 0); 1960 + if (retval < 0) { 1961 + dev_dbg(musb->controller, "add_hcd failed, %d\n", retval); 1962 + goto err; 1963 1963 } 1964 + 1965 + if ((musb->xceiv->last_event == USB_EVENT_ID) 1966 + && otg->set_vbus) 1967 + otg_set_vbus(otg, 1); 1968 + 1969 + hcd->self.uses_pio_for_control = 1; 1970 + 1964 1971 if (musb->xceiv->last_event == USB_EVENT_NONE) 1965 1972 pm_runtime_put(musb->controller); 1966 1973 1967 1974 return 0; 1968 1975 1969 - err2: 1970 - if (!is_otg_enabled(musb)) 1971 - musb_stop(musb); 1972 - err0: 1976 + err: 1973 1977 return retval; 1974 1978 } 1975 1979 ··· 2028 2070 musb_platform_try_idle(musb, 0); 2029 2071 spin_unlock_irqrestore(&musb->lock, flags); 2030 2072 2031 - if (is_otg_enabled(musb)) { 2032 - usb_remove_hcd(musb_to_hcd(musb)); 2033 - /* FIXME we need to be able to register another 2034 - * gadget driver here and have everything work; 2035 - * that currently misbehaves. 2036 - */ 2037 - } 2038 - 2039 - if (!is_otg_enabled(musb)) 2040 - musb_stop(musb); 2073 + usb_remove_hcd(musb_to_hcd(musb)); 2074 + /* 2075 + * FIXME we need to be able to register another 2076 + * gadget driver here and have everything work; 2077 + * that currently misbehaves. 2078 + */ 2041 2079 2042 2080 pm_runtime_put(musb->controller); 2043 2081 ··· 2195 2241 if (devctl & MUSB_DEVCTL_BDEVICE) { 2196 2242 musb->xceiv->state = OTG_STATE_B_PERIPHERAL; 2197 2243 musb->g.is_a_peripheral = 0; 2198 - } else if (is_otg_enabled(musb)) { 2244 + } else { 2199 2245 musb->xceiv->state = OTG_STATE_A_PERIPHERAL; 2200 2246 musb->g.is_a_peripheral = 1; 2201 - } else 2202 - WARN_ON(1); 2247 + } 2203 2248 2204 2249 /* start with default limits on VBUS power draw */ 2205 - (void) musb_gadget_vbus_draw(&musb->g, 2206 - is_otg_enabled(musb) ? 8 : 100); 2250 + (void) musb_gadget_vbus_draw(&musb->g, 8); 2207 2251 }
+196 -68
drivers/usb/musb/musb_host.c
··· 693 693 void __iomem *epio = hw_ep->regs; 694 694 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out); 695 695 u16 packet_sz = qh->maxpacket; 696 + u8 use_dma = 1; 697 + u16 csr; 696 698 697 699 dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s " 698 700 "h_addr%02x h_port%02x bytes %d\n", ··· 706 704 707 705 musb_ep_select(mbase, epnum); 708 706 707 + if (is_out && !len) { 708 + use_dma = 0; 709 + csr = musb_readw(epio, MUSB_TXCSR); 710 + csr &= ~MUSB_TXCSR_DMAENAB; 711 + musb_writew(epio, MUSB_TXCSR, csr); 712 + hw_ep->tx_channel = NULL; 713 + } 714 + 709 715 /* candidate for DMA? */ 710 716 dma_controller = musb->dma_controller; 711 - if (is_dma_capable() && epnum && dma_controller) { 717 + if (use_dma && is_dma_capable() && epnum && dma_controller) { 712 718 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; 713 719 if (!dma_channel) { 714 720 dma_channel = dma_controller->channel_alloc( ··· 823 813 if (load_count) { 824 814 /* PIO to load FIFO */ 825 815 qh->segsize = load_count; 826 - musb_write_fifo(hw_ep, load_count, buf); 816 + if (!buf) { 817 + sg_miter_start(&qh->sg_miter, urb->sg, 1, 818 + SG_MITER_ATOMIC 819 + | SG_MITER_FROM_SG); 820 + if (!sg_miter_next(&qh->sg_miter)) { 821 + dev_err(musb->controller, 822 + "error: sg" 823 + "list empty\n"); 824 + sg_miter_stop(&qh->sg_miter); 825 + goto finish; 826 + } 827 + buf = qh->sg_miter.addr + urb->sg->offset + 828 + urb->actual_length; 829 + load_count = min_t(u32, load_count, 830 + qh->sg_miter.length); 831 + musb_write_fifo(hw_ep, load_count, buf); 832 + qh->sg_miter.consumed = load_count; 833 + sg_miter_stop(&qh->sg_miter); 834 + } else 835 + musb_write_fifo(hw_ep, load_count, buf); 827 836 } 828 - 837 + finish: 829 838 /* re-enable interrupt */ 830 839 musb_writew(mbase, MUSB_INTRTXE, int_txe); 831 840 ··· 911 882 } 912 883 } 913 884 885 + /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to 886 + * the end; avoids starvation for other endpoints. 887 + */ 888 + static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, 889 + int is_in) 890 + { 891 + struct dma_channel *dma; 892 + struct urb *urb; 893 + void __iomem *mbase = musb->mregs; 894 + void __iomem *epio = ep->regs; 895 + struct musb_qh *cur_qh, *next_qh; 896 + u16 rx_csr, tx_csr; 897 + 898 + musb_ep_select(mbase, ep->epnum); 899 + if (is_in) { 900 + dma = is_dma_capable() ? ep->rx_channel : NULL; 901 + 902 + /* clear nak timeout bit */ 903 + rx_csr = musb_readw(epio, MUSB_RXCSR); 904 + rx_csr |= MUSB_RXCSR_H_WZC_BITS; 905 + rx_csr &= ~MUSB_RXCSR_DATAERROR; 906 + musb_writew(epio, MUSB_RXCSR, rx_csr); 907 + 908 + cur_qh = first_qh(&musb->in_bulk); 909 + } else { 910 + dma = is_dma_capable() ? ep->tx_channel : NULL; 911 + 912 + /* clear nak timeout bit */ 913 + tx_csr = musb_readw(epio, MUSB_TXCSR); 914 + tx_csr |= MUSB_TXCSR_H_WZC_BITS; 915 + tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT; 916 + musb_writew(epio, MUSB_TXCSR, tx_csr); 917 + 918 + cur_qh = first_qh(&musb->out_bulk); 919 + } 920 + if (cur_qh) { 921 + urb = next_urb(cur_qh); 922 + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 923 + dma->status = MUSB_DMA_STATUS_CORE_ABORT; 924 + musb->dma_controller->channel_abort(dma); 925 + urb->actual_length += dma->actual_len; 926 + dma->actual_len = 0L; 927 + } 928 + musb_save_toggle(cur_qh, is_in, urb); 929 + 930 + if (is_in) { 931 + /* move cur_qh to end of queue */ 932 + list_move_tail(&cur_qh->ring, &musb->in_bulk); 933 + 934 + /* get the next qh from musb->in_bulk */ 935 + next_qh = first_qh(&musb->in_bulk); 936 + 937 + /* set rx_reinit and schedule the next qh */ 938 + ep->rx_reinit = 1; 939 + } else { 940 + /* move cur_qh to end of queue */ 941 + list_move_tail(&cur_qh->ring, &musb->out_bulk); 942 + 943 + /* get the next qh from musb->out_bulk */ 944 + next_qh = first_qh(&musb->out_bulk); 945 + 946 + /* set tx_reinit and schedule the next qh */ 947 + ep->tx_reinit = 1; 948 + } 949 + musb_start_urb(musb, is_in, next_qh); 950 + } 951 + } 914 952 915 953 /* 916 954 * Service the default endpoint (ep0) as host. ··· 1212 1116 void __iomem *mbase = musb->mregs; 1213 1117 struct dma_channel *dma; 1214 1118 bool transfer_pending = false; 1119 + static bool use_sg; 1215 1120 1216 1121 musb_ep_select(mbase, epnum); 1217 1122 tx_csr = musb_readw(epio, MUSB_TXCSR); ··· 1243 1146 status = -ETIMEDOUT; 1244 1147 1245 1148 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { 1246 - dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum); 1247 - 1248 - /* NOTE: this code path would be a good place to PAUSE a 1249 - * transfer, if there's some other (nonperiodic) tx urb 1250 - * that could use this fifo. (dma complicates it...) 1251 - * That's already done for bulk RX transfers. 1252 - * 1253 - * if (bulk && qh->ring.next != &musb->out_bulk), then 1254 - * we have a candidate... NAKing is *NOT* an error 1255 - */ 1256 - musb_ep_select(mbase, epnum); 1257 - musb_writew(epio, MUSB_TXCSR, 1258 - MUSB_TXCSR_H_WZC_BITS 1259 - | MUSB_TXCSR_TXPKTRDY); 1260 - return; 1149 + if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1 1150 + && !list_is_singular(&musb->out_bulk)) { 1151 + dev_dbg(musb->controller, 1152 + "NAK timeout on TX%d ep\n", epnum); 1153 + musb_bulk_nak_timeout(musb, hw_ep, 0); 1154 + } else { 1155 + dev_dbg(musb->controller, 1156 + "TX end=%d device not responding\n", epnum); 1157 + /* NOTE: this code path would be a good place to PAUSE a 1158 + * transfer, if there's some other (nonperiodic) tx urb 1159 + * that could use this fifo. (dma complicates it...) 1160 + * That's already done for bulk RX transfers. 1161 + * 1162 + * if (bulk && qh->ring.next != &musb->out_bulk), then 1163 + * we have a candidate... NAKing is *NOT* an error 1164 + */ 1165 + musb_ep_select(mbase, epnum); 1166 + musb_writew(epio, MUSB_TXCSR, 1167 + MUSB_TXCSR_H_WZC_BITS 1168 + | MUSB_TXCSR_TXPKTRDY); 1169 + } 1170 + return; 1261 1171 } 1262 1172 1173 + done: 1263 1174 if (status) { 1264 1175 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1265 1176 dma->status = MUSB_DMA_STATUS_CORE_ABORT; ··· 1437 1332 length = qh->maxpacket; 1438 1333 /* Unmap the buffer so that CPU can use it */ 1439 1334 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); 1440 - musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); 1335 + 1336 + /* 1337 + * We need to map sg if the transfer_buffer is 1338 + * NULL. 1339 + */ 1340 + if (!urb->transfer_buffer) 1341 + use_sg = true; 1342 + 1343 + if (use_sg) { 1344 + /* sg_miter_start is already done in musb_ep_program */ 1345 + if (!sg_miter_next(&qh->sg_miter)) { 1346 + dev_err(musb->controller, "error: sg list empty\n"); 1347 + sg_miter_stop(&qh->sg_miter); 1348 + status = -EINVAL; 1349 + goto done; 1350 + } 1351 + urb->transfer_buffer = qh->sg_miter.addr; 1352 + length = min_t(u32, length, qh->sg_miter.length); 1353 + musb_write_fifo(hw_ep, length, urb->transfer_buffer); 1354 + qh->sg_miter.consumed = length; 1355 + sg_miter_stop(&qh->sg_miter); 1356 + } else { 1357 + musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); 1358 + } 1359 + 1441 1360 qh->segsize = length; 1361 + 1362 + if (use_sg) { 1363 + if (offset + length >= urb->transfer_buffer_length) 1364 + use_sg = false; 1365 + } 1442 1366 1443 1367 musb_ep_select(mbase, epnum); 1444 1368 musb_writew(epio, MUSB_TXCSR, ··· 1514 1380 1515 1381 #endif 1516 1382 1517 - /* Schedule next QH from musb->in_bulk and move the current qh to 1518 - * the end; avoids starvation for other endpoints. 1519 - */ 1520 - static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep) 1521 - { 1522 - struct dma_channel *dma; 1523 - struct urb *urb; 1524 - void __iomem *mbase = musb->mregs; 1525 - void __iomem *epio = ep->regs; 1526 - struct musb_qh *cur_qh, *next_qh; 1527 - u16 rx_csr; 1528 - 1529 - musb_ep_select(mbase, ep->epnum); 1530 - dma = is_dma_capable() ? ep->rx_channel : NULL; 1531 - 1532 - /* clear nak timeout bit */ 1533 - rx_csr = musb_readw(epio, MUSB_RXCSR); 1534 - rx_csr |= MUSB_RXCSR_H_WZC_BITS; 1535 - rx_csr &= ~MUSB_RXCSR_DATAERROR; 1536 - musb_writew(epio, MUSB_RXCSR, rx_csr); 1537 - 1538 - cur_qh = first_qh(&musb->in_bulk); 1539 - if (cur_qh) { 1540 - urb = next_urb(cur_qh); 1541 - if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1542 - dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1543 - musb->dma_controller->channel_abort(dma); 1544 - urb->actual_length += dma->actual_len; 1545 - dma->actual_len = 0L; 1546 - } 1547 - musb_save_toggle(cur_qh, 1, urb); 1548 - 1549 - /* move cur_qh to end of queue */ 1550 - list_move_tail(&cur_qh->ring, &musb->in_bulk); 1551 - 1552 - /* get the next qh from musb->in_bulk */ 1553 - next_qh = first_qh(&musb->in_bulk); 1554 - 1555 - /* set rx_reinit and schedule the next qh */ 1556 - ep->rx_reinit = 1; 1557 - musb_start_urb(musb, 1, next_qh); 1558 - } 1559 - } 1560 - 1561 1383 /* 1562 1384 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, 1563 1385 * and high-bandwidth IN transfer cases. ··· 1532 1442 bool done = false; 1533 1443 u32 status; 1534 1444 struct dma_channel *dma; 1445 + static bool use_sg; 1446 + unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; 1535 1447 1536 1448 musb_ep_select(mbase, epnum); 1537 1449 ··· 1592 1500 if (usb_pipebulk(urb->pipe) 1593 1501 && qh->mux == 1 1594 1502 && !list_is_singular(&musb->in_bulk)) { 1595 - musb_bulk_rx_nak_timeout(musb, hw_ep); 1503 + musb_bulk_nak_timeout(musb, hw_ep, 1); 1596 1504 return; 1597 1505 } 1598 1506 musb_ep_select(mbase, epnum); ··· 1848 1756 #endif /* Mentor DMA */ 1849 1757 1850 1758 if (!dma) { 1759 + unsigned int received_len; 1760 + 1851 1761 /* Unmap the buffer so that CPU can use it */ 1852 1762 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); 1853 - done = musb_host_packet_rx(musb, urb, 1854 - epnum, iso_err); 1763 + 1764 + /* 1765 + * We need to map sg if the transfer_buffer is 1766 + * NULL. 1767 + */ 1768 + if (!urb->transfer_buffer) { 1769 + use_sg = true; 1770 + sg_miter_start(&qh->sg_miter, urb->sg, 1, 1771 + sg_flags); 1772 + } 1773 + 1774 + if (use_sg) { 1775 + if (!sg_miter_next(&qh->sg_miter)) { 1776 + dev_err(musb->controller, "error: sg list empty\n"); 1777 + sg_miter_stop(&qh->sg_miter); 1778 + status = -EINVAL; 1779 + done = true; 1780 + goto finish; 1781 + } 1782 + urb->transfer_buffer = qh->sg_miter.addr; 1783 + received_len = urb->actual_length; 1784 + qh->offset = 0x0; 1785 + done = musb_host_packet_rx(musb, urb, epnum, 1786 + iso_err); 1787 + /* Calculate the number of bytes received */ 1788 + received_len = urb->actual_length - 1789 + received_len; 1790 + qh->sg_miter.consumed = received_len; 1791 + sg_miter_stop(&qh->sg_miter); 1792 + } else { 1793 + done = musb_host_packet_rx(musb, urb, 1794 + epnum, iso_err); 1795 + } 1855 1796 dev_dbg(musb->controller, "read %spacket\n", done ? "last " : ""); 1856 1797 } 1857 1798 } ··· 1893 1768 urb->actual_length += xfer_len; 1894 1769 qh->offset += xfer_len; 1895 1770 if (done) { 1771 + if (use_sg) 1772 + use_sg = false; 1773 + 1896 1774 if (urb->status == -EINPROGRESS) 1897 1775 urb->status = status; 1898 1776 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); ··· 1991 1863 else 1992 1864 head = &musb->out_bulk; 1993 1865 1994 - /* Enable bulk RX NAK timeout scheme when bulk requests are 1866 + /* Enable bulk RX/TX NAK timeout scheme when bulk requests are 1995 1867 * multiplexed. This scheme doen't work in high speed to full 1996 1868 * speed scenario as NAK interrupts are not coming from a 1997 1869 * full speed device connected to a high speed device. 1998 1870 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and 1999 1871 * 4 (8 frame or 8ms) for FS device. 2000 1872 */ 2001 - if (is_in && qh->dev) 1873 + if (qh->dev) 2002 1874 qh->intv_reg = 2003 1875 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; 2004 1876 goto success;
+3
drivers/usb/musb/musb_host.h
··· 35 35 #ifndef _MUSB_HOST_H 36 36 #define _MUSB_HOST_H 37 37 38 + #include <linux/scatterlist.h> 39 + 38 40 static inline struct usb_hcd *musb_to_hcd(struct musb *musb) 39 41 { 40 42 return container_of((void *) musb, struct usb_hcd, hcd_priv); ··· 73 71 u16 maxpacket; 74 72 u16 frame; /* for periodic schedule */ 75 73 unsigned iso_idx; /* in urb->iso_frame_desc[] */ 74 + struct sg_mapping_iter sg_miter; /* for highmem in PIO mode */ 76 75 }; 77 76 78 77 /* map from control or bulk queue head to the first qh on that ring */
+5 -8
drivers/usb/musb/musb_virthub.c
··· 81 81 switch (musb->xceiv->state) { 82 82 case OTG_STATE_A_HOST: 83 83 musb->xceiv->state = OTG_STATE_A_SUSPEND; 84 - musb->is_active = is_otg_enabled(musb) 85 - && otg->host->b_hnp_enable; 84 + musb->is_active = otg->host->b_hnp_enable; 86 85 if (musb->is_active) 87 86 mod_timer(&musb->otg_timer, jiffies 88 87 + msecs_to_jiffies( ··· 90 91 break; 91 92 case OTG_STATE_B_HOST: 92 93 musb->xceiv->state = OTG_STATE_B_WAIT_ACON; 93 - musb->is_active = is_otg_enabled(musb) 94 - && otg->host->b_hnp_enable; 94 + musb->is_active = otg->host->b_hnp_enable; 95 95 musb_platform_try_idle(musb, 0); 96 96 break; 97 97 default: ··· 188 190 189 191 switch (musb->xceiv->state) { 190 192 case OTG_STATE_A_SUSPEND: 191 - if (is_otg_enabled(musb) 192 - && otg->host->b_hnp_enable) { 193 + if (otg->host->b_hnp_enable) { 193 194 musb->xceiv->state = OTG_STATE_A_PERIPHERAL; 194 195 musb->g.is_a_peripheral = 1; 195 196 break; ··· 270 273 musb_port_suspend(musb, false); 271 274 break; 272 275 case USB_PORT_FEAT_POWER: 273 - if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) 276 + if (!hcd->self.is_b_host) 274 277 musb_platform_set_vbus(musb, 0); 275 278 break; 276 279 case USB_PORT_FEAT_C_CONNECTION: ··· 366 369 * initialization logic, e.g. for OTG, or change any 367 370 * logic relating to VBUS power-up. 368 371 */ 369 - if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) 372 + if (!hcd->self.is_b_host) 370 373 musb_start(musb); 371 374 break; 372 375 case USB_PORT_FEAT_RESET:
+1 -1
drivers/usb/musb/musbhsdma.c
··· 380 380 kfree(controller); 381 381 } 382 382 383 - struct dma_controller *__init 383 + struct dma_controller *__devinit 384 384 dma_controller_create(struct musb *musb, void __iomem *base) 385 385 { 386 386 struct musb_dma_controller *controller;
+132 -26
drivers/usb/musb/omap2430.c
··· 30 30 #include <linux/init.h> 31 31 #include <linux/list.h> 32 32 #include <linux/io.h> 33 + #include <linux/of.h> 33 34 #include <linux/platform_device.h> 34 35 #include <linux/dma-mapping.h> 35 36 #include <linux/pm_runtime.h> 36 37 #include <linux/err.h> 38 + #include <linux/delay.h> 37 39 #include <linux/usb/musb-omap.h> 38 40 39 41 #include "musb_core.h" ··· 46 44 struct platform_device *musb; 47 45 enum omap_musb_vbus_id_status status; 48 46 struct work_struct omap_musb_mailbox_work; 47 + u32 __iomem *control_otghs; 49 48 }; 50 49 #define glue_to_musb(g) platform_get_drvdata(g->musb) 51 50 52 51 struct omap2430_glue *_glue; 53 52 54 53 static struct timer_list musb_idle_timer; 54 + 55 + /** 56 + * omap4_usb_phy_mailbox - write to usb otg mailbox 57 + * @glue: struct omap2430_glue * 58 + * @val: the value to be written to the mailbox 59 + * 60 + * On detection of a device (ID pin is grounded), this API should be called 61 + * to set AVALID, VBUSVALID and ID pin is grounded. 62 + * 63 + * When OMAP is connected to a host (OMAP in device mode), this API 64 + * is called to set AVALID, VBUSVALID and ID pin in high impedance. 65 + * 66 + * XXX: This function will be removed once we have a seperate driver for 67 + * control module 68 + */ 69 + static void omap4_usb_phy_mailbox(struct omap2430_glue *glue, u32 val) 70 + { 71 + if (glue->control_otghs) 72 + writel(val, glue->control_otghs); 73 + } 55 74 56 75 static void musb_do_idle(unsigned long _musb) 57 76 { ··· 163 140 struct usb_otg *otg = musb->xceiv->otg; 164 141 u8 devctl; 165 142 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 166 - int ret = 1; 167 143 /* HDRC controls CPEN, but beware current surges during device 168 144 * connect. They can trigger transient overcurrent conditions 169 145 * that must be ignored. ··· 172 150 173 151 if (is_on) { 174 152 if (musb->xceiv->state == OTG_STATE_A_IDLE) { 153 + int loops = 100; 175 154 /* start the session */ 176 155 devctl |= MUSB_DEVCTL_SESSION; 177 156 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); ··· 182 159 */ 183 160 while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) { 184 161 162 + mdelay(5); 185 163 cpu_relax(); 186 164 187 - if (time_after(jiffies, timeout)) { 165 + if (time_after(jiffies, timeout) 166 + || loops-- <= 0) { 188 167 dev_err(musb->controller, 189 168 "configured as A device timeout"); 190 - ret = -EINVAL; 191 169 break; 192 170 } 193 171 } 194 172 195 - if (ret && otg->set_vbus) 173 + if (otg->set_vbus) 196 174 otg_set_vbus(otg, 1); 197 175 } else { 198 176 musb->is_active = 1; ··· 269 245 270 246 static void omap_musb_set_mailbox(struct omap2430_glue *glue) 271 247 { 248 + u32 val; 272 249 struct musb *musb = glue_to_musb(glue); 273 250 struct device *dev = musb->controller; 274 251 struct musb_hdrc_platform_data *pdata = dev->platform_data; ··· 283 258 otg->default_a = true; 284 259 musb->xceiv->state = OTG_STATE_A_IDLE; 285 260 musb->xceiv->last_event = USB_EVENT_ID; 286 - if (!is_otg_enabled(musb) || musb->gadget_driver) { 261 + if (musb->gadget_driver) { 287 262 pm_runtime_get_sync(dev); 288 - usb_phy_init(musb->xceiv); 263 + val = AVALID | VBUSVALID; 264 + omap4_usb_phy_mailbox(glue, val); 289 265 omap2430_musb_set_vbus(musb, 1); 290 266 } 291 267 break; ··· 299 273 musb->xceiv->last_event = USB_EVENT_VBUS; 300 274 if (musb->gadget_driver) 301 275 pm_runtime_get_sync(dev); 302 - usb_phy_init(musb->xceiv); 276 + val = IDDIG | AVALID | VBUSVALID; 277 + omap4_usb_phy_mailbox(glue, val); 303 278 break; 304 279 305 280 case OMAP_MUSB_ID_FLOAT: ··· 308 281 dev_dbg(dev, "VBUS Disconnect\n"); 309 282 310 283 musb->xceiv->last_event = USB_EVENT_NONE; 311 - if (is_otg_enabled(musb) || is_peripheral_enabled(musb)) 312 - if (musb->gadget_driver) { 313 - pm_runtime_mark_last_busy(dev); 314 - pm_runtime_put_autosuspend(dev); 315 - } 284 + if (musb->gadget_driver) { 285 + pm_runtime_mark_last_busy(dev); 286 + pm_runtime_put_autosuspend(dev); 287 + } 316 288 317 289 if (data->interface_type == MUSB_INTERFACE_UTMI) { 318 290 if (musb->xceiv->otg->set_vbus) 319 291 otg_set_vbus(musb->xceiv->otg, 0); 320 292 } 321 - usb_phy_shutdown(musb->xceiv); 293 + val = SESSEND | IDDIG; 294 + omap4_usb_phy_mailbox(glue, val); 322 295 break; 323 296 default: 324 297 dev_dbg(dev, "ID float\n"); ··· 393 366 static void omap2430_musb_enable(struct musb *musb) 394 367 { 395 368 u8 devctl; 369 + u32 val; 396 370 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 397 371 struct device *dev = musb->controller; 398 372 struct omap2430_glue *glue = dev_get_drvdata(dev->parent); ··· 403 375 switch (glue->status) { 404 376 405 377 case OMAP_MUSB_ID_GROUND: 406 - usb_phy_init(musb->xceiv); 378 + val = AVALID | VBUSVALID; 379 + omap4_usb_phy_mailbox(glue, val); 407 380 if (data->interface_type != MUSB_INTERFACE_UTMI) 408 381 break; 409 382 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); ··· 423 394 break; 424 395 425 396 case OMAP_MUSB_VBUS_VALID: 426 - usb_phy_init(musb->xceiv); 397 + val = IDDIG | AVALID | VBUSVALID; 398 + omap4_usb_phy_mailbox(glue, val); 427 399 break; 428 400 429 401 default: ··· 434 404 435 405 static void omap2430_musb_disable(struct musb *musb) 436 406 { 407 + u32 val; 437 408 struct device *dev = musb->controller; 438 409 struct omap2430_glue *glue = dev_get_drvdata(dev->parent); 439 410 440 - if (glue->status != OMAP_MUSB_UNKNOWN) 441 - usb_phy_shutdown(musb->xceiv); 411 + if (glue->status != OMAP_MUSB_UNKNOWN) { 412 + val = SESSEND | IDDIG; 413 + omap4_usb_phy_mailbox(glue, val); 414 + } 442 415 } 443 416 444 417 static int omap2430_musb_exit(struct musb *musb) ··· 471 438 static int __devinit omap2430_probe(struct platform_device *pdev) 472 439 { 473 440 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; 441 + struct omap_musb_board_data *data; 474 442 struct platform_device *musb; 475 443 struct omap2430_glue *glue; 444 + struct device_node *np = pdev->dev.of_node; 445 + struct musb_hdrc_config *config; 446 + struct resource *res; 476 447 int ret = -ENOMEM; 448 + int musbid; 477 449 478 450 glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); 479 451 if (!glue) { ··· 486 448 goto err0; 487 449 } 488 450 489 - musb = platform_device_alloc("musb-hdrc", -1); 490 - if (!musb) { 491 - dev_err(&pdev->dev, "failed to allocate musb device\n"); 451 + /* get the musb id */ 452 + musbid = musb_get_id(&pdev->dev, GFP_KERNEL); 453 + if (musbid < 0) { 454 + dev_err(&pdev->dev, "failed to allocate musb id\n"); 455 + ret = -ENOMEM; 492 456 goto err0; 493 457 } 494 458 459 + musb = platform_device_alloc("musb-hdrc", musbid); 460 + if (!musb) { 461 + dev_err(&pdev->dev, "failed to allocate musb device\n"); 462 + goto err1; 463 + } 464 + 465 + musb->id = musbid; 495 466 musb->dev.parent = &pdev->dev; 496 467 musb->dev.dma_mask = &omap2430_dmamask; 497 468 musb->dev.coherent_dma_mask = omap2430_dmamask; ··· 508 461 glue->dev = &pdev->dev; 509 462 glue->musb = musb; 510 463 glue->status = OMAP_MUSB_UNKNOWN; 464 + 465 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 466 + 467 + glue->control_otghs = devm_request_and_ioremap(&pdev->dev, res); 468 + if (glue->control_otghs == NULL) 469 + dev_dbg(&pdev->dev, "Failed to obtain control memory\n"); 470 + 471 + if (np) { 472 + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 473 + if (!pdata) { 474 + dev_err(&pdev->dev, 475 + "failed to allocate musb platfrom data\n"); 476 + ret = -ENOMEM; 477 + goto err1; 478 + } 479 + 480 + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 481 + if (!data) { 482 + dev_err(&pdev->dev, 483 + "failed to allocate musb board data\n"); 484 + ret = -ENOMEM; 485 + goto err1; 486 + } 487 + 488 + config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL); 489 + if (!data) { 490 + dev_err(&pdev->dev, 491 + "failed to allocate musb hdrc config\n"); 492 + goto err1; 493 + } 494 + 495 + of_property_read_u32(np, "mode", (u32 *)&pdata->mode); 496 + of_property_read_u32(np, "interface_type", 497 + (u32 *)&data->interface_type); 498 + of_property_read_u32(np, "num_eps", (u32 *)&config->num_eps); 499 + of_property_read_u32(np, "ram_bits", (u32 *)&config->ram_bits); 500 + of_property_read_u32(np, "power", (u32 *)&pdata->power); 501 + config->multipoint = of_property_read_bool(np, "multipoint"); 502 + 503 + pdata->board_data = data; 504 + pdata->config = config; 505 + } 511 506 512 507 pdata->platform_ops = &omap2430_ops; 513 508 ··· 567 478 pdev->num_resources); 568 479 if (ret) { 569 480 dev_err(&pdev->dev, "failed to add resources\n"); 570 - goto err1; 481 + goto err2; 571 482 } 572 483 573 484 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 574 485 if (ret) { 575 486 dev_err(&pdev->dev, "failed to add platform_data\n"); 576 - goto err1; 487 + goto err2; 577 488 } 578 489 579 490 pm_runtime_enable(&pdev->dev); ··· 581 492 ret = platform_device_add(musb); 582 493 if (ret) { 583 494 dev_err(&pdev->dev, "failed to register musb device\n"); 584 - goto err1; 495 + goto err2; 585 496 } 586 497 587 498 return 0; 588 499 589 - err1: 500 + err2: 590 501 platform_device_put(musb); 502 + 503 + err1: 504 + musb_put_id(&pdev->dev, musbid); 591 505 592 506 err0: 593 507 return ret; ··· 601 509 struct omap2430_glue *glue = platform_get_drvdata(pdev); 602 510 603 511 cancel_work_sync(&glue->omap_musb_mailbox_work); 604 - platform_device_del(glue->musb); 605 - platform_device_put(glue->musb); 512 + musb_put_id(&pdev->dev, glue->musb->id); 513 + platform_device_unregister(glue->musb); 606 514 607 515 return 0; 608 516 } ··· 651 559 #define DEV_PM_OPS NULL 652 560 #endif 653 561 562 + #ifdef CONFIG_OF 563 + static const struct of_device_id omap2430_id_table[] = { 564 + { 565 + .compatible = "ti,omap4-musb" 566 + }, 567 + { 568 + .compatible = "ti,omap3-musb" 569 + }, 570 + {}, 571 + }; 572 + MODULE_DEVICE_TABLE(of, omap2430_id_table); 573 + #endif 574 + 654 575 static struct platform_driver omap2430_driver = { 655 576 .probe = omap2430_probe, 656 577 .remove = __devexit_p(omap2430_remove), 657 578 .driver = { 658 579 .name = "musb-omap2430", 659 580 .pm = DEV_PM_OPS, 581 + .of_match_table = of_match_ptr(omap2430_id_table), 660 582 }, 661 583 }; 662 584
+9
drivers/usb/musb/omap2430.h
··· 49 49 #define OTG_FORCESTDBY 0x414 50 50 # define ENABLEFORCE (1 << 0) 51 51 52 + /* 53 + * Control Module bit definitions 54 + * XXX: Will be removed once we have a driver for control module. 55 + */ 56 + #define AVALID BIT(0) 57 + #define BVALID BIT(1) 58 + #define VBUSVALID BIT(2) 59 + #define SESSEND BIT(3) 60 + #define IDDIG BIT(4) 52 61 #endif /* __MUSB_OMAP243X_H__ */
+28 -30
drivers/usb/musb/tusb6010.c
··· 154 154 } 155 155 156 156 static inline void tusb_fifo_read_unaligned(void __iomem *fifo, 157 - void __iomem *buf, u16 len) 157 + void *buf, u16 len) 158 158 { 159 159 u32 val; 160 160 int i; ··· 438 438 if (is_host_active(musb) && (musb->port1_status >> 16)) 439 439 goto done; 440 440 441 - if (is_peripheral_enabled(musb) && !musb->gadget_driver) { 441 + if (!musb->gadget_driver) { 442 442 wakeups = 0; 443 443 } else { 444 444 wakeups = TUSB_PRCM_WHOSTDISCON 445 445 | TUSB_PRCM_WBUS 446 446 | TUSB_PRCM_WVBUS; 447 - if (is_otg_enabled(musb)) 448 - wakeups |= TUSB_PRCM_WID; 447 + wakeups |= TUSB_PRCM_WID; 449 448 } 450 449 tusb_allow_idle(musb, wakeups); 451 450 } ··· 582 583 * 583 584 * Note that if a mini-A cable is plugged in the ID line will stay down as 584 585 * the weak ID pull-up is not able to pull the ID up. 585 - * 586 - * REVISIT: It would be possible to add support for changing between host 587 - * and peripheral modes in non-OTG configurations by reconfiguring hardware 588 - * and then setting musb->board_mode. For now, only support OTG mode. 589 586 */ 590 587 static int tusb_musb_set_mode(struct musb *musb, u8 musb_mode) 591 588 { 592 589 void __iomem *tbase = musb->ctrl_base; 593 590 u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf; 594 - 595 - if (musb->board_mode != MUSB_OTG) { 596 - ERR("Changing mode currently only supported in OTG mode\n"); 597 - return -EINVAL; 598 - } 599 591 600 592 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); 601 593 phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL); ··· 643 653 if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) { 644 654 int default_a; 645 655 646 - if (is_otg_enabled(musb)) 647 - default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS); 648 - else 649 - default_a = is_host_enabled(musb); 656 + default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS); 650 657 dev_dbg(musb->controller, "Default-%c\n", default_a ? 'A' : 'B'); 651 658 otg->default_a = default_a; 652 659 tusb_musb_set_vbus(musb, default_a); ··· 657 670 if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) { 658 671 659 672 /* B-dev state machine: no vbus ~= disconnect */ 660 - if ((is_otg_enabled(musb) && !otg->default_a) 661 - || !is_host_enabled(musb)) { 673 + if (!otg->default_a) { 662 674 /* ? musb_root_disconnect(musb); */ 663 675 musb->port1_status &= 664 676 ~(USB_PORT_STAT_CONNECTION ··· 1106 1120 } 1107 1121 musb->isr = tusb_musb_interrupt; 1108 1122 1109 - if (is_peripheral_enabled(musb)) { 1110 - musb->xceiv->set_power = tusb_draw_power; 1111 - the_musb = musb; 1112 - } 1123 + musb->xceiv->set_power = tusb_draw_power; 1124 + the_musb = musb; 1113 1125 1114 1126 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); 1115 1127 ··· 1160 1176 struct tusb6010_glue *glue; 1161 1177 1162 1178 int ret = -ENOMEM; 1179 + int musbid; 1163 1180 1164 1181 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 1165 1182 if (!glue) { ··· 1168 1183 goto err0; 1169 1184 } 1170 1185 1171 - musb = platform_device_alloc("musb-hdrc", -1); 1172 - if (!musb) { 1173 - dev_err(&pdev->dev, "failed to allocate musb device\n"); 1186 + /* get the musb id */ 1187 + musbid = musb_get_id(&pdev->dev, GFP_KERNEL); 1188 + if (musbid < 0) { 1189 + dev_err(&pdev->dev, "failed to allocate musb id\n"); 1190 + ret = -ENOMEM; 1174 1191 goto err1; 1175 1192 } 1176 1193 1194 + musb = platform_device_alloc("musb-hdrc", musbid); 1195 + if (!musb) { 1196 + dev_err(&pdev->dev, "failed to allocate musb device\n"); 1197 + goto err2; 1198 + } 1199 + 1200 + musb->id = musbid; 1177 1201 musb->dev.parent = &pdev->dev; 1178 1202 musb->dev.dma_mask = &tusb_dmamask; 1179 1203 musb->dev.coherent_dma_mask = tusb_dmamask; ··· 1198 1204 pdev->num_resources); 1199 1205 if (ret) { 1200 1206 dev_err(&pdev->dev, "failed to add resources\n"); 1201 - goto err2; 1207 + goto err3; 1202 1208 } 1203 1209 1204 1210 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 1205 1211 if (ret) { 1206 1212 dev_err(&pdev->dev, "failed to add platform_data\n"); 1207 - goto err2; 1213 + goto err3; 1208 1214 } 1209 1215 1210 1216 ret = platform_device_add(musb); 1211 1217 if (ret) { 1212 1218 dev_err(&pdev->dev, "failed to register musb device\n"); 1213 - goto err1; 1219 + goto err3; 1214 1220 } 1215 1221 1216 1222 return 0; 1217 1223 1218 - err2: 1224 + err3: 1219 1225 platform_device_put(musb); 1226 + 1227 + err2: 1228 + musb_put_id(&pdev->dev, musbid); 1220 1229 1221 1230 err1: 1222 1231 kfree(glue); ··· 1232 1235 { 1233 1236 struct tusb6010_glue *glue = platform_get_drvdata(pdev); 1234 1237 1238 + musb_put_id(&pdev->dev, glue->musb->id); 1235 1239 platform_device_del(glue->musb); 1236 1240 platform_device_put(glue->musb); 1237 1241 kfree(glue);
+1 -1
drivers/usb/musb/tusb6010_omap.c
··· 662 662 kfree(tusb_dma); 663 663 } 664 664 665 - struct dma_controller *__init 665 + struct dma_controller *__devinit 666 666 dma_controller_create(struct musb *musb, void __iomem *base) 667 667 { 668 668 void __iomem *tbase = musb->ctrl_base;
+23 -10
drivers/usb/musb/ux500.c
··· 74 74 goto err0; 75 75 } 76 76 77 - musb = platform_device_alloc("musb-hdrc", -1); 77 + /* get the musb id */ 78 + musbid = musb_get_id(&pdev->dev, GFP_KERNEL); 79 + if (musbid < 0) { 80 + dev_err(&pdev->dev, "failed to allocate musb id\n"); 81 + ret = -ENOMEM; 82 + goto err1; 83 + } 84 + 85 + musb = platform_device_alloc("musb-hdrc", musbid); 78 86 if (!musb) { 79 87 dev_err(&pdev->dev, "failed to allocate musb device\n"); 80 - goto err1; 88 + goto err2; 81 89 } 82 90 83 91 clk = clk_get(&pdev->dev, "usb"); 84 92 if (IS_ERR(clk)) { 85 93 dev_err(&pdev->dev, "failed to get clock\n"); 86 94 ret = PTR_ERR(clk); 87 - goto err2; 95 + goto err3; 88 96 } 89 97 90 98 ret = clk_enable(clk); 91 99 if (ret) { 92 100 dev_err(&pdev->dev, "failed to enable clock\n"); 93 - goto err3; 101 + goto err4; 94 102 } 95 103 104 + musb->id = musbid; 96 105 musb->dev.parent = &pdev->dev; 97 106 musb->dev.dma_mask = pdev->dev.dma_mask; 98 107 musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; ··· 118 109 pdev->num_resources); 119 110 if (ret) { 120 111 dev_err(&pdev->dev, "failed to add resources\n"); 121 - goto err4; 112 + goto err5; 122 113 } 123 114 124 115 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 125 116 if (ret) { 126 117 dev_err(&pdev->dev, "failed to add platform_data\n"); 127 - goto err4; 118 + goto err5; 128 119 } 129 120 130 121 ret = platform_device_add(musb); 131 122 if (ret) { 132 123 dev_err(&pdev->dev, "failed to register musb device\n"); 133 - goto err4; 124 + goto err5; 134 125 } 135 126 136 127 return 0; 137 128 138 - err4: 129 + err5: 139 130 clk_disable(clk); 140 131 141 - err3: 132 + err4: 142 133 clk_put(clk); 143 134 144 - err2: 135 + err3: 145 136 platform_device_put(musb); 137 + 138 + err2: 139 + musb_put_id(&pdev->dev, musbid); 146 140 147 141 err1: 148 142 kfree(glue); ··· 158 146 { 159 147 struct ux500_glue *glue = platform_get_drvdata(pdev); 160 148 149 + musb_put_id(&pdev->dev, glue->musb->id); 161 150 platform_device_del(glue->musb); 162 151 platform_device_put(glue->musb); 163 152 clk_disable(glue->clk);
+1 -1
drivers/usb/musb/ux500_dma.c
··· 364 364 kfree(controller); 365 365 } 366 366 367 - struct dma_controller *__init 367 + struct dma_controller *__devinit 368 368 dma_controller_create(struct musb *musb, void __iomem *base) 369 369 { 370 370 struct ux500_dma_controller *controller;