Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.4-rc3 3422 lines 86 kB view raw
1/* 2 * amd5536.c -- AMD 5536 UDC high/full speed USB device controller 3 * 4 * Copyright (C) 2005-2007 AMD (http://www.amd.com) 5 * Author: Thomas Dahlmann 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 */ 12 13/* 14 * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536. 15 * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it 16 * provides 4 IN and 4 OUT endpoints (bulk or interrupt type). 17 * 18 * Make sure that UDC is assigned to port 4 by BIOS settings (port can also 19 * be used as host port) and UOC bits PAD_EN and APU are set (should be done 20 * by BIOS init). 21 * 22 * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not 23 * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0") 24 * can be used with gadget ether. 25 */ 26 27/* debug control */ 28/* #define UDC_VERBOSE */ 29 30/* Driver strings */ 31#define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller" 32#define UDC_DRIVER_VERSION_STRING "01.00.0206" 33 34/* system */ 35#include <linux/module.h> 36#include <linux/pci.h> 37#include <linux/kernel.h> 38#include <linux/delay.h> 39#include <linux/ioport.h> 40#include <linux/sched.h> 41#include <linux/slab.h> 42#include <linux/errno.h> 43#include <linux/init.h> 44#include <linux/timer.h> 45#include <linux/list.h> 46#include <linux/interrupt.h> 47#include <linux/ioctl.h> 48#include <linux/fs.h> 49#include <linux/dmapool.h> 50#include <linux/moduleparam.h> 51#include <linux/device.h> 52#include <linux/io.h> 53#include <linux/irq.h> 54#include <linux/prefetch.h> 55 56#include <asm/byteorder.h> 57#include <asm/unaligned.h> 58 59/* gadget stack */ 60#include <linux/usb/ch9.h> 61#include <linux/usb/gadget.h> 62 63/* udc specific */ 64#include "amd5536udc.h" 65 66 67static void udc_tasklet_disconnect(unsigned long); 68static void empty_req_queue(struct udc_ep *); 69static int udc_probe(struct udc *dev); 70static void udc_basic_init(struct udc *dev); 71static void udc_setup_endpoints(struct udc *dev); 72static void udc_soft_reset(struct udc *dev); 73static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep); 74static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq); 75static int udc_free_dma_chain(struct udc *dev, struct udc_request *req); 76static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req, 77 unsigned long buf_len, gfp_t gfp_flags); 78static int udc_remote_wakeup(struct udc *dev); 79static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); 80static void udc_pci_remove(struct pci_dev *pdev); 81 82/* description */ 83static const char mod_desc[] = UDC_MOD_DESCRIPTION; 84static const char name[] = "amd5536udc"; 85 86/* structure to hold endpoint function pointers */ 87static const struct usb_ep_ops udc_ep_ops; 88 89/* received setup data */ 90static union udc_setup_data setup_data; 91 92/* pointer to device object */ 93static struct udc *udc; 94 95/* irq spin lock for soft reset */ 96static DEFINE_SPINLOCK(udc_irq_spinlock); 97/* stall spin lock */ 98static DEFINE_SPINLOCK(udc_stall_spinlock); 99 100/* 101* slave mode: pending bytes in rx fifo after nyet, 102* used if EPIN irq came but no req was available 103*/ 104static unsigned int udc_rxfifo_pending; 105 106/* count soft resets after suspend to avoid loop */ 107static int soft_reset_occured; 108static int soft_reset_after_usbreset_occured; 109 110/* timer */ 111static struct timer_list udc_timer; 112static int stop_timer; 113 114/* set_rde -- Is used to control enabling of RX DMA. Problem is 115 * that UDC has only one bit (RDE) to enable/disable RX DMA for 116 * all OUT endpoints. So we have to handle race conditions like 117 * when OUT data reaches the fifo but no request was queued yet. 118 * This cannot be solved by letting the RX DMA disabled until a 119 * request gets queued because there may be other OUT packets 120 * in the FIFO (important for not blocking control traffic). 121 * The value of set_rde controls the correspondig timer. 122 * 123 * set_rde -1 == not used, means it is alloed to be set to 0 or 1 124 * set_rde 0 == do not touch RDE, do no start the RDE timer 125 * set_rde 1 == timer function will look whether FIFO has data 126 * set_rde 2 == set by timer function to enable RX DMA on next call 127 */ 128static int set_rde = -1; 129 130static DECLARE_COMPLETION(on_exit); 131static struct timer_list udc_pollstall_timer; 132static int stop_pollstall_timer; 133static DECLARE_COMPLETION(on_pollstall_exit); 134 135/* tasklet for usb disconnect */ 136static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect, 137 (unsigned long) &udc); 138 139 140/* endpoint names used for print */ 141static const char ep0_string[] = "ep0in"; 142static const char *const ep_string[] = { 143 ep0_string, 144 "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk", 145 "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk", 146 "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk", 147 "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk", 148 "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk", 149 "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk", 150 "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk" 151}; 152 153/* DMA usage flag */ 154static bool use_dma = 1; 155/* packet per buffer dma */ 156static bool use_dma_ppb = 1; 157/* with per descr. update */ 158static bool use_dma_ppb_du; 159/* buffer fill mode */ 160static int use_dma_bufferfill_mode; 161/* full speed only mode */ 162static bool use_fullspeed; 163/* tx buffer size for high speed */ 164static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE; 165 166/* module parameters */ 167module_param(use_dma, bool, S_IRUGO); 168MODULE_PARM_DESC(use_dma, "true for DMA"); 169module_param(use_dma_ppb, bool, S_IRUGO); 170MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode"); 171module_param(use_dma_ppb_du, bool, S_IRUGO); 172MODULE_PARM_DESC(use_dma_ppb_du, 173 "true for DMA in packet per buffer mode with descriptor update"); 174module_param(use_fullspeed, bool, S_IRUGO); 175MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only"); 176 177/*---------------------------------------------------------------------------*/ 178/* Prints UDC device registers and endpoint irq registers */ 179static void print_regs(struct udc *dev) 180{ 181 DBG(dev, "------- Device registers -------\n"); 182 DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg)); 183 DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl)); 184 DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts)); 185 DBG(dev, "\n"); 186 DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts)); 187 DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk)); 188 DBG(dev, "\n"); 189 DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts)); 190 DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk)); 191 DBG(dev, "\n"); 192 DBG(dev, "USE DMA = %d\n", use_dma); 193 if (use_dma && use_dma_ppb && !use_dma_ppb_du) { 194 DBG(dev, "DMA mode = PPBNDU (packet per buffer " 195 "WITHOUT desc. update)\n"); 196 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU"); 197 } else if (use_dma && use_dma_ppb && use_dma_ppb_du) { 198 DBG(dev, "DMA mode = PPBDU (packet per buffer " 199 "WITH desc. update)\n"); 200 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU"); 201 } 202 if (use_dma && use_dma_bufferfill_mode) { 203 DBG(dev, "DMA mode = BF (buffer fill mode)\n"); 204 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF"); 205 } 206 if (!use_dma) 207 dev_info(&dev->pdev->dev, "FIFO mode\n"); 208 DBG(dev, "-------------------------------------------------------\n"); 209} 210 211/* Masks unused interrupts */ 212static int udc_mask_unused_interrupts(struct udc *dev) 213{ 214 u32 tmp; 215 216 /* mask all dev interrupts */ 217 tmp = AMD_BIT(UDC_DEVINT_SVC) | 218 AMD_BIT(UDC_DEVINT_ENUM) | 219 AMD_BIT(UDC_DEVINT_US) | 220 AMD_BIT(UDC_DEVINT_UR) | 221 AMD_BIT(UDC_DEVINT_ES) | 222 AMD_BIT(UDC_DEVINT_SI) | 223 AMD_BIT(UDC_DEVINT_SOF)| 224 AMD_BIT(UDC_DEVINT_SC); 225 writel(tmp, &dev->regs->irqmsk); 226 227 /* mask all ep interrupts */ 228 writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk); 229 230 return 0; 231} 232 233/* Enables endpoint 0 interrupts */ 234static int udc_enable_ep0_interrupts(struct udc *dev) 235{ 236 u32 tmp; 237 238 DBG(dev, "udc_enable_ep0_interrupts()\n"); 239 240 /* read irq mask */ 241 tmp = readl(&dev->regs->ep_irqmsk); 242 /* enable ep0 irq's */ 243 tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0) 244 & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0); 245 writel(tmp, &dev->regs->ep_irqmsk); 246 247 return 0; 248} 249 250/* Enables device interrupts for SET_INTF and SET_CONFIG */ 251static int udc_enable_dev_setup_interrupts(struct udc *dev) 252{ 253 u32 tmp; 254 255 DBG(dev, "enable device interrupts for setup data\n"); 256 257 /* read irq mask */ 258 tmp = readl(&dev->regs->irqmsk); 259 260 /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */ 261 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI) 262 & AMD_UNMASK_BIT(UDC_DEVINT_SC) 263 & AMD_UNMASK_BIT(UDC_DEVINT_UR) 264 & AMD_UNMASK_BIT(UDC_DEVINT_SVC) 265 & AMD_UNMASK_BIT(UDC_DEVINT_ENUM); 266 writel(tmp, &dev->regs->irqmsk); 267 268 return 0; 269} 270 271/* Calculates fifo start of endpoint based on preceding endpoints */ 272static int udc_set_txfifo_addr(struct udc_ep *ep) 273{ 274 struct udc *dev; 275 u32 tmp; 276 int i; 277 278 if (!ep || !(ep->in)) 279 return -EINVAL; 280 281 dev = ep->dev; 282 ep->txfifo = dev->txfifo; 283 284 /* traverse ep's */ 285 for (i = 0; i < ep->num; i++) { 286 if (dev->ep[i].regs) { 287 /* read fifo size */ 288 tmp = readl(&dev->ep[i].regs->bufin_framenum); 289 tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE); 290 ep->txfifo += tmp; 291 } 292 } 293 return 0; 294} 295 296/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */ 297static u32 cnak_pending; 298 299static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num) 300{ 301 if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) { 302 DBG(ep->dev, "NAK could not be cleared for ep%d\n", num); 303 cnak_pending |= 1 << (num); 304 ep->naking = 1; 305 } else 306 cnak_pending = cnak_pending & (~(1 << (num))); 307} 308 309 310/* Enables endpoint, is called by gadget driver */ 311static int 312udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc) 313{ 314 struct udc_ep *ep; 315 struct udc *dev; 316 u32 tmp; 317 unsigned long iflags; 318 u8 udc_csr_epix; 319 unsigned maxpacket; 320 321 if (!usbep 322 || usbep->name == ep0_string 323 || !desc 324 || desc->bDescriptorType != USB_DT_ENDPOINT) 325 return -EINVAL; 326 327 ep = container_of(usbep, struct udc_ep, ep); 328 dev = ep->dev; 329 330 DBG(dev, "udc_ep_enable() ep %d\n", ep->num); 331 332 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 333 return -ESHUTDOWN; 334 335 spin_lock_irqsave(&dev->lock, iflags); 336 ep->desc = desc; 337 338 ep->halted = 0; 339 340 /* set traffic type */ 341 tmp = readl(&dev->ep[ep->num].regs->ctl); 342 tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET); 343 writel(tmp, &dev->ep[ep->num].regs->ctl); 344 345 /* set max packet size */ 346 maxpacket = usb_endpoint_maxp(desc); 347 tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt); 348 tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE); 349 ep->ep.maxpacket = maxpacket; 350 writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt); 351 352 /* IN ep */ 353 if (ep->in) { 354 355 /* ep ix in UDC CSR register space */ 356 udc_csr_epix = ep->num; 357 358 /* set buffer size (tx fifo entries) */ 359 tmp = readl(&dev->ep[ep->num].regs->bufin_framenum); 360 /* double buffering: fifo size = 2 x max packet size */ 361 tmp = AMD_ADDBITS( 362 tmp, 363 maxpacket * UDC_EPIN_BUFF_SIZE_MULT 364 / UDC_DWORD_BYTES, 365 UDC_EPIN_BUFF_SIZE); 366 writel(tmp, &dev->ep[ep->num].regs->bufin_framenum); 367 368 /* calc. tx fifo base addr */ 369 udc_set_txfifo_addr(ep); 370 371 /* flush fifo */ 372 tmp = readl(&ep->regs->ctl); 373 tmp |= AMD_BIT(UDC_EPCTL_F); 374 writel(tmp, &ep->regs->ctl); 375 376 /* OUT ep */ 377 } else { 378 /* ep ix in UDC CSR register space */ 379 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; 380 381 /* set max packet size UDC CSR */ 382 tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]); 383 tmp = AMD_ADDBITS(tmp, maxpacket, 384 UDC_CSR_NE_MAX_PKT); 385 writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]); 386 387 if (use_dma && !ep->in) { 388 /* alloc and init BNA dummy request */ 389 ep->bna_dummy_req = udc_alloc_bna_dummy(ep); 390 ep->bna_occurred = 0; 391 } 392 393 if (ep->num != UDC_EP0OUT_IX) 394 dev->data_ep_enabled = 1; 395 } 396 397 /* set ep values */ 398 tmp = readl(&dev->csr->ne[udc_csr_epix]); 399 /* max packet */ 400 tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT); 401 /* ep number */ 402 tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM); 403 /* ep direction */ 404 tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR); 405 /* ep type */ 406 tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE); 407 /* ep config */ 408 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG); 409 /* ep interface */ 410 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF); 411 /* ep alt */ 412 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT); 413 /* write reg */ 414 writel(tmp, &dev->csr->ne[udc_csr_epix]); 415 416 /* enable ep irq */ 417 tmp = readl(&dev->regs->ep_irqmsk); 418 tmp &= AMD_UNMASK_BIT(ep->num); 419 writel(tmp, &dev->regs->ep_irqmsk); 420 421 /* 422 * clear NAK by writing CNAK 423 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written 424 */ 425 if (!use_dma || ep->in) { 426 tmp = readl(&ep->regs->ctl); 427 tmp |= AMD_BIT(UDC_EPCTL_CNAK); 428 writel(tmp, &ep->regs->ctl); 429 ep->naking = 0; 430 UDC_QUEUE_CNAK(ep, ep->num); 431 } 432 tmp = desc->bEndpointAddress; 433 DBG(dev, "%s enabled\n", usbep->name); 434 435 spin_unlock_irqrestore(&dev->lock, iflags); 436 return 0; 437} 438 439/* Resets endpoint */ 440static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep) 441{ 442 u32 tmp; 443 444 VDBG(ep->dev, "ep-%d reset\n", ep->num); 445 ep->desc = NULL; 446 ep->ep.desc = NULL; 447 ep->ep.ops = &udc_ep_ops; 448 INIT_LIST_HEAD(&ep->queue); 449 450 ep->ep.maxpacket = (u16) ~0; 451 /* set NAK */ 452 tmp = readl(&ep->regs->ctl); 453 tmp |= AMD_BIT(UDC_EPCTL_SNAK); 454 writel(tmp, &ep->regs->ctl); 455 ep->naking = 1; 456 457 /* disable interrupt */ 458 tmp = readl(&regs->ep_irqmsk); 459 tmp |= AMD_BIT(ep->num); 460 writel(tmp, &regs->ep_irqmsk); 461 462 if (ep->in) { 463 /* unset P and IN bit of potential former DMA */ 464 tmp = readl(&ep->regs->ctl); 465 tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P); 466 writel(tmp, &ep->regs->ctl); 467 468 tmp = readl(&ep->regs->sts); 469 tmp |= AMD_BIT(UDC_EPSTS_IN); 470 writel(tmp, &ep->regs->sts); 471 472 /* flush the fifo */ 473 tmp = readl(&ep->regs->ctl); 474 tmp |= AMD_BIT(UDC_EPCTL_F); 475 writel(tmp, &ep->regs->ctl); 476 477 } 478 /* reset desc pointer */ 479 writel(0, &ep->regs->desptr); 480} 481 482/* Disables endpoint, is called by gadget driver */ 483static int udc_ep_disable(struct usb_ep *usbep) 484{ 485 struct udc_ep *ep = NULL; 486 unsigned long iflags; 487 488 if (!usbep) 489 return -EINVAL; 490 491 ep = container_of(usbep, struct udc_ep, ep); 492 if (usbep->name == ep0_string || !ep->desc) 493 return -EINVAL; 494 495 DBG(ep->dev, "Disable ep-%d\n", ep->num); 496 497 spin_lock_irqsave(&ep->dev->lock, iflags); 498 udc_free_request(&ep->ep, &ep->bna_dummy_req->req); 499 empty_req_queue(ep); 500 ep_init(ep->dev->regs, ep); 501 spin_unlock_irqrestore(&ep->dev->lock, iflags); 502 503 return 0; 504} 505 506/* Allocates request packet, called by gadget driver */ 507static struct usb_request * 508udc_alloc_request(struct usb_ep *usbep, gfp_t gfp) 509{ 510 struct udc_request *req; 511 struct udc_data_dma *dma_desc; 512 struct udc_ep *ep; 513 514 if (!usbep) 515 return NULL; 516 517 ep = container_of(usbep, struct udc_ep, ep); 518 519 VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num); 520 req = kzalloc(sizeof(struct udc_request), gfp); 521 if (!req) 522 return NULL; 523 524 req->req.dma = DMA_DONT_USE; 525 INIT_LIST_HEAD(&req->queue); 526 527 if (ep->dma) { 528 /* ep0 in requests are allocated from data pool here */ 529 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp, 530 &req->td_phys); 531 if (!dma_desc) { 532 kfree(req); 533 return NULL; 534 } 535 536 VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, " 537 "td_phys = %lx\n", 538 req, dma_desc, 539 (unsigned long)req->td_phys); 540 /* prevent from using desc. - set HOST BUSY */ 541 dma_desc->status = AMD_ADDBITS(dma_desc->status, 542 UDC_DMA_STP_STS_BS_HOST_BUSY, 543 UDC_DMA_STP_STS_BS); 544 dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE); 545 req->td_data = dma_desc; 546 req->td_data_last = NULL; 547 req->chain_len = 1; 548 } 549 550 return &req->req; 551} 552 553/* Frees request packet, called by gadget driver */ 554static void 555udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq) 556{ 557 struct udc_ep *ep; 558 struct udc_request *req; 559 560 if (!usbep || !usbreq) 561 return; 562 563 ep = container_of(usbep, struct udc_ep, ep); 564 req = container_of(usbreq, struct udc_request, req); 565 VDBG(ep->dev, "free_req req=%p\n", req); 566 BUG_ON(!list_empty(&req->queue)); 567 if (req->td_data) { 568 VDBG(ep->dev, "req->td_data=%p\n", req->td_data); 569 570 /* free dma chain if created */ 571 if (req->chain_len > 1) 572 udc_free_dma_chain(ep->dev, req); 573 574 pci_pool_free(ep->dev->data_requests, req->td_data, 575 req->td_phys); 576 } 577 kfree(req); 578} 579 580/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */ 581static void udc_init_bna_dummy(struct udc_request *req) 582{ 583 if (req) { 584 /* set last bit */ 585 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L); 586 /* set next pointer to itself */ 587 req->td_data->next = req->td_phys; 588 /* set HOST BUSY */ 589 req->td_data->status 590 = AMD_ADDBITS(req->td_data->status, 591 UDC_DMA_STP_STS_BS_DMA_DONE, 592 UDC_DMA_STP_STS_BS); 593#ifdef UDC_VERBOSE 594 pr_debug("bna desc = %p, sts = %08x\n", 595 req->td_data, req->td_data->status); 596#endif 597 } 598} 599 600/* Allocate BNA dummy descriptor */ 601static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep) 602{ 603 struct udc_request *req = NULL; 604 struct usb_request *_req = NULL; 605 606 /* alloc the dummy request */ 607 _req = udc_alloc_request(&ep->ep, GFP_ATOMIC); 608 if (_req) { 609 req = container_of(_req, struct udc_request, req); 610 ep->bna_dummy_req = req; 611 udc_init_bna_dummy(req); 612 } 613 return req; 614} 615 616/* Write data to TX fifo for IN packets */ 617static void 618udc_txfifo_write(struct udc_ep *ep, struct usb_request *req) 619{ 620 u8 *req_buf; 621 u32 *buf; 622 int i, j; 623 unsigned bytes = 0; 624 unsigned remaining = 0; 625 626 if (!req || !ep) 627 return; 628 629 req_buf = req->buf + req->actual; 630 prefetch(req_buf); 631 remaining = req->length - req->actual; 632 633 buf = (u32 *) req_buf; 634 635 bytes = ep->ep.maxpacket; 636 if (bytes > remaining) 637 bytes = remaining; 638 639 /* dwords first */ 640 for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) 641 writel(*(buf + i), ep->txfifo); 642 643 /* remaining bytes must be written by byte access */ 644 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) { 645 writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)), 646 ep->txfifo); 647 } 648 649 /* dummy write confirm */ 650 writel(0, &ep->regs->confirm); 651} 652 653/* Read dwords from RX fifo for OUT transfers */ 654static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords) 655{ 656 int i; 657 658 VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords); 659 660 for (i = 0; i < dwords; i++) 661 *(buf + i) = readl(dev->rxfifo); 662 return 0; 663} 664 665/* Read bytes from RX fifo for OUT transfers */ 666static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes) 667{ 668 int i, j; 669 u32 tmp; 670 671 VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes); 672 673 /* dwords first */ 674 for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) 675 *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo); 676 677 /* remaining bytes must be read by byte access */ 678 if (bytes % UDC_DWORD_BYTES) { 679 tmp = readl(dev->rxfifo); 680 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) { 681 *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK); 682 tmp = tmp >> UDC_BITS_PER_BYTE; 683 } 684 } 685 686 return 0; 687} 688 689/* Read data from RX fifo for OUT transfers */ 690static int 691udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req) 692{ 693 u8 *buf; 694 unsigned buf_space; 695 unsigned bytes = 0; 696 unsigned finished = 0; 697 698 /* received number bytes */ 699 bytes = readl(&ep->regs->sts); 700 bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE); 701 702 buf_space = req->req.length - req->req.actual; 703 buf = req->req.buf + req->req.actual; 704 if (bytes > buf_space) { 705 if ((buf_space % ep->ep.maxpacket) != 0) { 706 DBG(ep->dev, 707 "%s: rx %d bytes, rx-buf space = %d bytesn\n", 708 ep->ep.name, bytes, buf_space); 709 req->req.status = -EOVERFLOW; 710 } 711 bytes = buf_space; 712 } 713 req->req.actual += bytes; 714 715 /* last packet ? */ 716 if (((bytes % ep->ep.maxpacket) != 0) || (!bytes) 717 || ((req->req.actual == req->req.length) && !req->req.zero)) 718 finished = 1; 719 720 /* read rx fifo bytes */ 721 VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes); 722 udc_rxfifo_read_bytes(ep->dev, buf, bytes); 723 724 return finished; 725} 726 727/* create/re-init a DMA descriptor or a DMA descriptor chain */ 728static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp) 729{ 730 int retval = 0; 731 u32 tmp; 732 733 VDBG(ep->dev, "prep_dma\n"); 734 VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n", 735 ep->num, req->td_data); 736 737 /* set buffer pointer */ 738 req->td_data->bufptr = req->req.dma; 739 740 /* set last bit */ 741 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L); 742 743 /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */ 744 if (use_dma_ppb) { 745 746 retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp); 747 if (retval != 0) { 748 if (retval == -ENOMEM) 749 DBG(ep->dev, "Out of DMA memory\n"); 750 return retval; 751 } 752 if (ep->in) { 753 if (req->req.length == ep->ep.maxpacket) { 754 /* write tx bytes */ 755 req->td_data->status = 756 AMD_ADDBITS(req->td_data->status, 757 ep->ep.maxpacket, 758 UDC_DMA_IN_STS_TXBYTES); 759 760 } 761 } 762 763 } 764 765 if (ep->in) { 766 VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d " 767 "maxpacket=%d ep%d\n", 768 use_dma_ppb, req->req.length, 769 ep->ep.maxpacket, ep->num); 770 /* 771 * if bytes < max packet then tx bytes must 772 * be written in packet per buffer mode 773 */ 774 if (!use_dma_ppb || req->req.length < ep->ep.maxpacket 775 || ep->num == UDC_EP0OUT_IX 776 || ep->num == UDC_EP0IN_IX) { 777 /* write tx bytes */ 778 req->td_data->status = 779 AMD_ADDBITS(req->td_data->status, 780 req->req.length, 781 UDC_DMA_IN_STS_TXBYTES); 782 /* reset frame num */ 783 req->td_data->status = 784 AMD_ADDBITS(req->td_data->status, 785 0, 786 UDC_DMA_IN_STS_FRAMENUM); 787 } 788 /* set HOST BUSY */ 789 req->td_data->status = 790 AMD_ADDBITS(req->td_data->status, 791 UDC_DMA_STP_STS_BS_HOST_BUSY, 792 UDC_DMA_STP_STS_BS); 793 } else { 794 VDBG(ep->dev, "OUT set host ready\n"); 795 /* set HOST READY */ 796 req->td_data->status = 797 AMD_ADDBITS(req->td_data->status, 798 UDC_DMA_STP_STS_BS_HOST_READY, 799 UDC_DMA_STP_STS_BS); 800 801 802 /* clear NAK by writing CNAK */ 803 if (ep->naking) { 804 tmp = readl(&ep->regs->ctl); 805 tmp |= AMD_BIT(UDC_EPCTL_CNAK); 806 writel(tmp, &ep->regs->ctl); 807 ep->naking = 0; 808 UDC_QUEUE_CNAK(ep, ep->num); 809 } 810 811 } 812 813 return retval; 814} 815 816/* Completes request packet ... caller MUST hold lock */ 817static void 818complete_req(struct udc_ep *ep, struct udc_request *req, int sts) 819__releases(ep->dev->lock) 820__acquires(ep->dev->lock) 821{ 822 struct udc *dev; 823 unsigned halted; 824 825 VDBG(ep->dev, "complete_req(): ep%d\n", ep->num); 826 827 dev = ep->dev; 828 /* unmap DMA */ 829 if (ep->dma) 830 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in); 831 832 halted = ep->halted; 833 ep->halted = 1; 834 835 /* set new status if pending */ 836 if (req->req.status == -EINPROGRESS) 837 req->req.status = sts; 838 839 /* remove from ep queue */ 840 list_del_init(&req->queue); 841 842 VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n", 843 &req->req, req->req.length, ep->ep.name, sts); 844 845 spin_unlock(&dev->lock); 846 req->req.complete(&ep->ep, &req->req); 847 spin_lock(&dev->lock); 848 ep->halted = halted; 849} 850 851/* frees pci pool descriptors of a DMA chain */ 852static int udc_free_dma_chain(struct udc *dev, struct udc_request *req) 853{ 854 855 int ret_val = 0; 856 struct udc_data_dma *td; 857 struct udc_data_dma *td_last = NULL; 858 unsigned int i; 859 860 DBG(dev, "free chain req = %p\n", req); 861 862 /* do not free first desc., will be done by free for request */ 863 td_last = req->td_data; 864 td = phys_to_virt(td_last->next); 865 866 for (i = 1; i < req->chain_len; i++) { 867 868 pci_pool_free(dev->data_requests, td, 869 (dma_addr_t) td_last->next); 870 td_last = td; 871 td = phys_to_virt(td_last->next); 872 } 873 874 return ret_val; 875} 876 877/* Iterates to the end of a DMA chain and returns last descriptor */ 878static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req) 879{ 880 struct udc_data_dma *td; 881 882 td = req->td_data; 883 while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) 884 td = phys_to_virt(td->next); 885 886 return td; 887 888} 889 890/* Iterates to the end of a DMA chain and counts bytes received */ 891static u32 udc_get_ppbdu_rxbytes(struct udc_request *req) 892{ 893 struct udc_data_dma *td; 894 u32 count; 895 896 td = req->td_data; 897 /* received number bytes */ 898 count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES); 899 900 while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) { 901 td = phys_to_virt(td->next); 902 /* received number bytes */ 903 if (td) { 904 count += AMD_GETBITS(td->status, 905 UDC_DMA_OUT_STS_RXBYTES); 906 } 907 } 908 909 return count; 910 911} 912 913/* Creates or re-inits a DMA chain */ 914static int udc_create_dma_chain( 915 struct udc_ep *ep, 916 struct udc_request *req, 917 unsigned long buf_len, gfp_t gfp_flags 918) 919{ 920 unsigned long bytes = req->req.length; 921 unsigned int i; 922 dma_addr_t dma_addr; 923 struct udc_data_dma *td = NULL; 924 struct udc_data_dma *last = NULL; 925 unsigned long txbytes; 926 unsigned create_new_chain = 0; 927 unsigned len; 928 929 VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n", 930 bytes, buf_len); 931 dma_addr = DMA_DONT_USE; 932 933 /* unset L bit in first desc for OUT */ 934 if (!ep->in) 935 req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L); 936 937 /* alloc only new desc's if not already available */ 938 len = req->req.length / ep->ep.maxpacket; 939 if (req->req.length % ep->ep.maxpacket) 940 len++; 941 942 if (len > req->chain_len) { 943 /* shorter chain already allocated before */ 944 if (req->chain_len > 1) 945 udc_free_dma_chain(ep->dev, req); 946 req->chain_len = len; 947 create_new_chain = 1; 948 } 949 950 td = req->td_data; 951 /* gen. required number of descriptors and buffers */ 952 for (i = buf_len; i < bytes; i += buf_len) { 953 /* create or determine next desc. */ 954 if (create_new_chain) { 955 956 td = pci_pool_alloc(ep->dev->data_requests, 957 gfp_flags, &dma_addr); 958 if (!td) 959 return -ENOMEM; 960 961 td->status = 0; 962 } else if (i == buf_len) { 963 /* first td */ 964 td = (struct udc_data_dma *) phys_to_virt( 965 req->td_data->next); 966 td->status = 0; 967 } else { 968 td = (struct udc_data_dma *) phys_to_virt(last->next); 969 td->status = 0; 970 } 971 972 973 if (td) 974 td->bufptr = req->req.dma + i; /* assign buffer */ 975 else 976 break; 977 978 /* short packet ? */ 979 if ((bytes - i) >= buf_len) { 980 txbytes = buf_len; 981 } else { 982 /* short packet */ 983 txbytes = bytes - i; 984 } 985 986 /* link td and assign tx bytes */ 987 if (i == buf_len) { 988 if (create_new_chain) 989 req->td_data->next = dma_addr; 990 /* 991 else 992 req->td_data->next = virt_to_phys(td); 993 */ 994 /* write tx bytes */ 995 if (ep->in) { 996 /* first desc */ 997 req->td_data->status = 998 AMD_ADDBITS(req->td_data->status, 999 ep->ep.maxpacket, 1000 UDC_DMA_IN_STS_TXBYTES); 1001 /* second desc */ 1002 td->status = AMD_ADDBITS(td->status, 1003 txbytes, 1004 UDC_DMA_IN_STS_TXBYTES); 1005 } 1006 } else { 1007 if (create_new_chain) 1008 last->next = dma_addr; 1009 /* 1010 else 1011 last->next = virt_to_phys(td); 1012 */ 1013 if (ep->in) { 1014 /* write tx bytes */ 1015 td->status = AMD_ADDBITS(td->status, 1016 txbytes, 1017 UDC_DMA_IN_STS_TXBYTES); 1018 } 1019 } 1020 last = td; 1021 } 1022 /* set last bit */ 1023 if (td) { 1024 td->status |= AMD_BIT(UDC_DMA_IN_STS_L); 1025 /* last desc. points to itself */ 1026 req->td_data_last = td; 1027 } 1028 1029 return 0; 1030} 1031 1032/* Enabling RX DMA */ 1033static void udc_set_rde(struct udc *dev) 1034{ 1035 u32 tmp; 1036 1037 VDBG(dev, "udc_set_rde()\n"); 1038 /* stop RDE timer */ 1039 if (timer_pending(&udc_timer)) { 1040 set_rde = 0; 1041 mod_timer(&udc_timer, jiffies - 1); 1042 } 1043 /* set RDE */ 1044 tmp = readl(&dev->regs->ctl); 1045 tmp |= AMD_BIT(UDC_DEVCTL_RDE); 1046 writel(tmp, &dev->regs->ctl); 1047} 1048 1049/* Queues a request packet, called by gadget driver */ 1050static int 1051udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp) 1052{ 1053 int retval = 0; 1054 u8 open_rxfifo = 0; 1055 unsigned long iflags; 1056 struct udc_ep *ep; 1057 struct udc_request *req; 1058 struct udc *dev; 1059 u32 tmp; 1060 1061 /* check the inputs */ 1062 req = container_of(usbreq, struct udc_request, req); 1063 1064 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf 1065 || !list_empty(&req->queue)) 1066 return -EINVAL; 1067 1068 ep = container_of(usbep, struct udc_ep, ep); 1069 if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX)) 1070 return -EINVAL; 1071 1072 VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in); 1073 dev = ep->dev; 1074 1075 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 1076 return -ESHUTDOWN; 1077 1078 /* map dma (usually done before) */ 1079 if (ep->dma) { 1080 VDBG(dev, "DMA map req %p\n", req); 1081 retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in); 1082 if (retval) 1083 return retval; 1084 } 1085 1086 VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n", 1087 usbep->name, usbreq, usbreq->length, 1088 req->td_data, usbreq->buf); 1089 1090 spin_lock_irqsave(&dev->lock, iflags); 1091 usbreq->actual = 0; 1092 usbreq->status = -EINPROGRESS; 1093 req->dma_done = 0; 1094 1095 /* on empty queue just do first transfer */ 1096 if (list_empty(&ep->queue)) { 1097 /* zlp */ 1098 if (usbreq->length == 0) { 1099 /* IN zlp's are handled by hardware */ 1100 complete_req(ep, req, 0); 1101 VDBG(dev, "%s: zlp\n", ep->ep.name); 1102 /* 1103 * if set_config or set_intf is waiting for ack by zlp 1104 * then set CSR_DONE 1105 */ 1106 if (dev->set_cfg_not_acked) { 1107 tmp = readl(&dev->regs->ctl); 1108 tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE); 1109 writel(tmp, &dev->regs->ctl); 1110 dev->set_cfg_not_acked = 0; 1111 } 1112 /* setup command is ACK'ed now by zlp */ 1113 if (dev->waiting_zlp_ack_ep0in) { 1114 /* clear NAK by writing CNAK in EP0_IN */ 1115 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); 1116 tmp |= AMD_BIT(UDC_EPCTL_CNAK); 1117 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); 1118 dev->ep[UDC_EP0IN_IX].naking = 0; 1119 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], 1120 UDC_EP0IN_IX); 1121 dev->waiting_zlp_ack_ep0in = 0; 1122 } 1123 goto finished; 1124 } 1125 if (ep->dma) { 1126 retval = prep_dma(ep, req, gfp); 1127 if (retval != 0) 1128 goto finished; 1129 /* write desc pointer to enable DMA */ 1130 if (ep->in) { 1131 /* set HOST READY */ 1132 req->td_data->status = 1133 AMD_ADDBITS(req->td_data->status, 1134 UDC_DMA_IN_STS_BS_HOST_READY, 1135 UDC_DMA_IN_STS_BS); 1136 } 1137 1138 /* disabled rx dma while descriptor update */ 1139 if (!ep->in) { 1140 /* stop RDE timer */ 1141 if (timer_pending(&udc_timer)) { 1142 set_rde = 0; 1143 mod_timer(&udc_timer, jiffies - 1); 1144 } 1145 /* clear RDE */ 1146 tmp = readl(&dev->regs->ctl); 1147 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE); 1148 writel(tmp, &dev->regs->ctl); 1149 open_rxfifo = 1; 1150 1151 /* 1152 * if BNA occurred then let BNA dummy desc. 1153 * point to current desc. 1154 */ 1155 if (ep->bna_occurred) { 1156 VDBG(dev, "copy to BNA dummy desc.\n"); 1157 memcpy(ep->bna_dummy_req->td_data, 1158 req->td_data, 1159 sizeof(struct udc_data_dma)); 1160 } 1161 } 1162 /* write desc pointer */ 1163 writel(req->td_phys, &ep->regs->desptr); 1164 1165 /* clear NAK by writing CNAK */ 1166 if (ep->naking) { 1167 tmp = readl(&ep->regs->ctl); 1168 tmp |= AMD_BIT(UDC_EPCTL_CNAK); 1169 writel(tmp, &ep->regs->ctl); 1170 ep->naking = 0; 1171 UDC_QUEUE_CNAK(ep, ep->num); 1172 } 1173 1174 if (ep->in) { 1175 /* enable ep irq */ 1176 tmp = readl(&dev->regs->ep_irqmsk); 1177 tmp &= AMD_UNMASK_BIT(ep->num); 1178 writel(tmp, &dev->regs->ep_irqmsk); 1179 } 1180 } else if (ep->in) { 1181 /* enable ep irq */ 1182 tmp = readl(&dev->regs->ep_irqmsk); 1183 tmp &= AMD_UNMASK_BIT(ep->num); 1184 writel(tmp, &dev->regs->ep_irqmsk); 1185 } 1186 1187 } else if (ep->dma) { 1188 1189 /* 1190 * prep_dma not used for OUT ep's, this is not possible 1191 * for PPB modes, because of chain creation reasons 1192 */ 1193 if (ep->in) { 1194 retval = prep_dma(ep, req, gfp); 1195 if (retval != 0) 1196 goto finished; 1197 } 1198 } 1199 VDBG(dev, "list_add\n"); 1200 /* add request to ep queue */ 1201 if (req) { 1202 1203 list_add_tail(&req->queue, &ep->queue); 1204 1205 /* open rxfifo if out data queued */ 1206 if (open_rxfifo) { 1207 /* enable DMA */ 1208 req->dma_going = 1; 1209 udc_set_rde(dev); 1210 if (ep->num != UDC_EP0OUT_IX) 1211 dev->data_ep_queued = 1; 1212 } 1213 /* stop OUT naking */ 1214 if (!ep->in) { 1215 if (!use_dma && udc_rxfifo_pending) { 1216 DBG(dev, "udc_queue(): pending bytes in " 1217 "rxfifo after nyet\n"); 1218 /* 1219 * read pending bytes afer nyet: 1220 * referring to isr 1221 */ 1222 if (udc_rxfifo_read(ep, req)) { 1223 /* finish */ 1224 complete_req(ep, req, 0); 1225 } 1226 udc_rxfifo_pending = 0; 1227 1228 } 1229 } 1230 } 1231 1232finished: 1233 spin_unlock_irqrestore(&dev->lock, iflags); 1234 return retval; 1235} 1236 1237/* Empty request queue of an endpoint; caller holds spinlock */ 1238static void empty_req_queue(struct udc_ep *ep) 1239{ 1240 struct udc_request *req; 1241 1242 ep->halted = 1; 1243 while (!list_empty(&ep->queue)) { 1244 req = list_entry(ep->queue.next, 1245 struct udc_request, 1246 queue); 1247 complete_req(ep, req, -ESHUTDOWN); 1248 } 1249} 1250 1251/* Dequeues a request packet, called by gadget driver */ 1252static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq) 1253{ 1254 struct udc_ep *ep; 1255 struct udc_request *req; 1256 unsigned halted; 1257 unsigned long iflags; 1258 1259 ep = container_of(usbep, struct udc_ep, ep); 1260 if (!usbep || !usbreq || (!ep->desc && (ep->num != 0 1261 && ep->num != UDC_EP0OUT_IX))) 1262 return -EINVAL; 1263 1264 req = container_of(usbreq, struct udc_request, req); 1265 1266 spin_lock_irqsave(&ep->dev->lock, iflags); 1267 halted = ep->halted; 1268 ep->halted = 1; 1269 /* request in processing or next one */ 1270 if (ep->queue.next == &req->queue) { 1271 if (ep->dma && req->dma_going) { 1272 if (ep->in) 1273 ep->cancel_transfer = 1; 1274 else { 1275 u32 tmp; 1276 u32 dma_sts; 1277 /* stop potential receive DMA */ 1278 tmp = readl(&udc->regs->ctl); 1279 writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE), 1280 &udc->regs->ctl); 1281 /* 1282 * Cancel transfer later in ISR 1283 * if descriptor was touched. 1284 */ 1285 dma_sts = AMD_GETBITS(req->td_data->status, 1286 UDC_DMA_OUT_STS_BS); 1287 if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY) 1288 ep->cancel_transfer = 1; 1289 else { 1290 udc_init_bna_dummy(ep->req); 1291 writel(ep->bna_dummy_req->td_phys, 1292 &ep->regs->desptr); 1293 } 1294 writel(tmp, &udc->regs->ctl); 1295 } 1296 } 1297 } 1298 complete_req(ep, req, -ECONNRESET); 1299 ep->halted = halted; 1300 1301 spin_unlock_irqrestore(&ep->dev->lock, iflags); 1302 return 0; 1303} 1304 1305/* Halt or clear halt of endpoint */ 1306static int 1307udc_set_halt(struct usb_ep *usbep, int halt) 1308{ 1309 struct udc_ep *ep; 1310 u32 tmp; 1311 unsigned long iflags; 1312 int retval = 0; 1313 1314 if (!usbep) 1315 return -EINVAL; 1316 1317 pr_debug("set_halt %s: halt=%d\n", usbep->name, halt); 1318 1319 ep = container_of(usbep, struct udc_ep, ep); 1320 if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX)) 1321 return -EINVAL; 1322 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1323 return -ESHUTDOWN; 1324 1325 spin_lock_irqsave(&udc_stall_spinlock, iflags); 1326 /* halt or clear halt */ 1327 if (halt) { 1328 if (ep->num == 0) 1329 ep->dev->stall_ep0in = 1; 1330 else { 1331 /* 1332 * set STALL 1333 * rxfifo empty not taken into acount 1334 */ 1335 tmp = readl(&ep->regs->ctl); 1336 tmp |= AMD_BIT(UDC_EPCTL_S); 1337 writel(tmp, &ep->regs->ctl); 1338 ep->halted = 1; 1339 1340 /* setup poll timer */ 1341 if (!timer_pending(&udc_pollstall_timer)) { 1342 udc_pollstall_timer.expires = jiffies + 1343 HZ * UDC_POLLSTALL_TIMER_USECONDS 1344 / (1000 * 1000); 1345 if (!stop_pollstall_timer) { 1346 DBG(ep->dev, "start polltimer\n"); 1347 add_timer(&udc_pollstall_timer); 1348 } 1349 } 1350 } 1351 } else { 1352 /* ep is halted by set_halt() before */ 1353 if (ep->halted) { 1354 tmp = readl(&ep->regs->ctl); 1355 /* clear stall bit */ 1356 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); 1357 /* clear NAK by writing CNAK */ 1358 tmp |= AMD_BIT(UDC_EPCTL_CNAK); 1359 writel(tmp, &ep->regs->ctl); 1360 ep->halted = 0; 1361 UDC_QUEUE_CNAK(ep, ep->num); 1362 } 1363 } 1364 spin_unlock_irqrestore(&udc_stall_spinlock, iflags); 1365 return retval; 1366} 1367 1368/* gadget interface */ 1369static const struct usb_ep_ops udc_ep_ops = { 1370 .enable = udc_ep_enable, 1371 .disable = udc_ep_disable, 1372 1373 .alloc_request = udc_alloc_request, 1374 .free_request = udc_free_request, 1375 1376 .queue = udc_queue, 1377 .dequeue = udc_dequeue, 1378 1379 .set_halt = udc_set_halt, 1380 /* fifo ops not implemented */ 1381}; 1382 1383/*-------------------------------------------------------------------------*/ 1384 1385/* Get frame counter (not implemented) */ 1386static int udc_get_frame(struct usb_gadget *gadget) 1387{ 1388 return -EOPNOTSUPP; 1389} 1390 1391/* Remote wakeup gadget interface */ 1392static int udc_wakeup(struct usb_gadget *gadget) 1393{ 1394 struct udc *dev; 1395 1396 if (!gadget) 1397 return -EINVAL; 1398 dev = container_of(gadget, struct udc, gadget); 1399 udc_remote_wakeup(dev); 1400 1401 return 0; 1402} 1403 1404static int amd5536_start(struct usb_gadget_driver *driver, 1405 int (*bind)(struct usb_gadget *)); 1406static int amd5536_stop(struct usb_gadget_driver *driver); 1407/* gadget operations */ 1408static const struct usb_gadget_ops udc_ops = { 1409 .wakeup = udc_wakeup, 1410 .get_frame = udc_get_frame, 1411 .start = amd5536_start, 1412 .stop = amd5536_stop, 1413}; 1414 1415/* Setups endpoint parameters, adds endpoints to linked list */ 1416static void make_ep_lists(struct udc *dev) 1417{ 1418 /* make gadget ep lists */ 1419 INIT_LIST_HEAD(&dev->gadget.ep_list); 1420 list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list, 1421 &dev->gadget.ep_list); 1422 list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list, 1423 &dev->gadget.ep_list); 1424 list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list, 1425 &dev->gadget.ep_list); 1426 1427 /* fifo config */ 1428 dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE; 1429 if (dev->gadget.speed == USB_SPEED_FULL) 1430 dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE; 1431 else if (dev->gadget.speed == USB_SPEED_HIGH) 1432 dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf; 1433 dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE; 1434} 1435 1436/* init registers at driver load time */ 1437static int startup_registers(struct udc *dev) 1438{ 1439 u32 tmp; 1440 1441 /* init controller by soft reset */ 1442 udc_soft_reset(dev); 1443 1444 /* mask not needed interrupts */ 1445 udc_mask_unused_interrupts(dev); 1446 1447 /* put into initial config */ 1448 udc_basic_init(dev); 1449 /* link up all endpoints */ 1450 udc_setup_endpoints(dev); 1451 1452 /* program speed */ 1453 tmp = readl(&dev->regs->cfg); 1454 if (use_fullspeed) 1455 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD); 1456 else 1457 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD); 1458 writel(tmp, &dev->regs->cfg); 1459 1460 return 0; 1461} 1462 1463/* Inits UDC context */ 1464static void udc_basic_init(struct udc *dev) 1465{ 1466 u32 tmp; 1467 1468 DBG(dev, "udc_basic_init()\n"); 1469 1470 dev->gadget.speed = USB_SPEED_UNKNOWN; 1471 1472 /* stop RDE timer */ 1473 if (timer_pending(&udc_timer)) { 1474 set_rde = 0; 1475 mod_timer(&udc_timer, jiffies - 1); 1476 } 1477 /* stop poll stall timer */ 1478 if (timer_pending(&udc_pollstall_timer)) 1479 mod_timer(&udc_pollstall_timer, jiffies - 1); 1480 /* disable DMA */ 1481 tmp = readl(&dev->regs->ctl); 1482 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE); 1483 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE); 1484 writel(tmp, &dev->regs->ctl); 1485 1486 /* enable dynamic CSR programming */ 1487 tmp = readl(&dev->regs->cfg); 1488 tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG); 1489 /* set self powered */ 1490 tmp |= AMD_BIT(UDC_DEVCFG_SP); 1491 /* set remote wakeupable */ 1492 tmp |= AMD_BIT(UDC_DEVCFG_RWKP); 1493 writel(tmp, &dev->regs->cfg); 1494 1495 make_ep_lists(dev); 1496 1497 dev->data_ep_enabled = 0; 1498 dev->data_ep_queued = 0; 1499} 1500 1501/* Sets initial endpoint parameters */ 1502static void udc_setup_endpoints(struct udc *dev) 1503{ 1504 struct udc_ep *ep; 1505 u32 tmp; 1506 u32 reg; 1507 1508 DBG(dev, "udc_setup_endpoints()\n"); 1509 1510 /* read enum speed */ 1511 tmp = readl(&dev->regs->sts); 1512 tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED); 1513 if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) 1514 dev->gadget.speed = USB_SPEED_HIGH; 1515 else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) 1516 dev->gadget.speed = USB_SPEED_FULL; 1517 1518 /* set basic ep parameters */ 1519 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) { 1520 ep = &dev->ep[tmp]; 1521 ep->dev = dev; 1522 ep->ep.name = ep_string[tmp]; 1523 ep->num = tmp; 1524 /* txfifo size is calculated at enable time */ 1525 ep->txfifo = dev->txfifo; 1526 1527 /* fifo size */ 1528 if (tmp < UDC_EPIN_NUM) { 1529 ep->fifo_depth = UDC_TXFIFO_SIZE; 1530 ep->in = 1; 1531 } else { 1532 ep->fifo_depth = UDC_RXFIFO_SIZE; 1533 ep->in = 0; 1534 1535 } 1536 ep->regs = &dev->ep_regs[tmp]; 1537 /* 1538 * ep will be reset only if ep was not enabled before to avoid 1539 * disabling ep interrupts when ENUM interrupt occurs but ep is 1540 * not enabled by gadget driver 1541 */ 1542 if (!ep->desc) 1543 ep_init(dev->regs, ep); 1544 1545 if (use_dma) { 1546 /* 1547 * ep->dma is not really used, just to indicate that 1548 * DMA is active: remove this 1549 * dma regs = dev control regs 1550 */ 1551 ep->dma = &dev->regs->ctl; 1552 1553 /* nak OUT endpoints until enable - not for ep0 */ 1554 if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX 1555 && tmp > UDC_EPIN_NUM) { 1556 /* set NAK */ 1557 reg = readl(&dev->ep[tmp].regs->ctl); 1558 reg |= AMD_BIT(UDC_EPCTL_SNAK); 1559 writel(reg, &dev->ep[tmp].regs->ctl); 1560 dev->ep[tmp].naking = 1; 1561 1562 } 1563 } 1564 } 1565 /* EP0 max packet */ 1566 if (dev->gadget.speed == USB_SPEED_FULL) { 1567 dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE; 1568 dev->ep[UDC_EP0OUT_IX].ep.maxpacket = 1569 UDC_FS_EP0OUT_MAX_PKT_SIZE; 1570 } else if (dev->gadget.speed == USB_SPEED_HIGH) { 1571 dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE; 1572 dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE; 1573 } 1574 1575 /* 1576 * with suspend bug workaround, ep0 params for gadget driver 1577 * are set at gadget driver bind() call 1578 */ 1579 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep; 1580 dev->ep[UDC_EP0IN_IX].halted = 0; 1581 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 1582 1583 /* init cfg/alt/int */ 1584 dev->cur_config = 0; 1585 dev->cur_intf = 0; 1586 dev->cur_alt = 0; 1587} 1588 1589/* Bringup after Connect event, initial bringup to be ready for ep0 events */ 1590static void usb_connect(struct udc *dev) 1591{ 1592 1593 dev_info(&dev->pdev->dev, "USB Connect\n"); 1594 1595 dev->connected = 1; 1596 1597 /* put into initial config */ 1598 udc_basic_init(dev); 1599 1600 /* enable device setup interrupts */ 1601 udc_enable_dev_setup_interrupts(dev); 1602} 1603 1604/* 1605 * Calls gadget with disconnect event and resets the UDC and makes 1606 * initial bringup to be ready for ep0 events 1607 */ 1608static void usb_disconnect(struct udc *dev) 1609{ 1610 1611 dev_info(&dev->pdev->dev, "USB Disconnect\n"); 1612 1613 dev->connected = 0; 1614 1615 /* mask interrupts */ 1616 udc_mask_unused_interrupts(dev); 1617 1618 /* REVISIT there doesn't seem to be a point to having this 1619 * talk to a tasklet ... do it directly, we already hold 1620 * the spinlock needed to process the disconnect. 1621 */ 1622 1623 tasklet_schedule(&disconnect_tasklet); 1624} 1625 1626/* Tasklet for disconnect to be outside of interrupt context */ 1627static void udc_tasklet_disconnect(unsigned long par) 1628{ 1629 struct udc *dev = (struct udc *)(*((struct udc **) par)); 1630 u32 tmp; 1631 1632 DBG(dev, "Tasklet disconnect\n"); 1633 spin_lock_irq(&dev->lock); 1634 1635 if (dev->driver) { 1636 spin_unlock(&dev->lock); 1637 dev->driver->disconnect(&dev->gadget); 1638 spin_lock(&dev->lock); 1639 1640 /* empty queues */ 1641 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) 1642 empty_req_queue(&dev->ep[tmp]); 1643 1644 } 1645 1646 /* disable ep0 */ 1647 ep_init(dev->regs, 1648 &dev->ep[UDC_EP0IN_IX]); 1649 1650 1651 if (!soft_reset_occured) { 1652 /* init controller by soft reset */ 1653 udc_soft_reset(dev); 1654 soft_reset_occured++; 1655 } 1656 1657 /* re-enable dev interrupts */ 1658 udc_enable_dev_setup_interrupts(dev); 1659 /* back to full speed ? */ 1660 if (use_fullspeed) { 1661 tmp = readl(&dev->regs->cfg); 1662 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD); 1663 writel(tmp, &dev->regs->cfg); 1664 } 1665 1666 spin_unlock_irq(&dev->lock); 1667} 1668 1669/* Reset the UDC core */ 1670static void udc_soft_reset(struct udc *dev) 1671{ 1672 unsigned long flags; 1673 1674 DBG(dev, "Soft reset\n"); 1675 /* 1676 * reset possible waiting interrupts, because int. 1677 * status is lost after soft reset, 1678 * ep int. status reset 1679 */ 1680 writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts); 1681 /* device int. status reset */ 1682 writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts); 1683 1684 spin_lock_irqsave(&udc_irq_spinlock, flags); 1685 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); 1686 readl(&dev->regs->cfg); 1687 spin_unlock_irqrestore(&udc_irq_spinlock, flags); 1688 1689} 1690 1691/* RDE timer callback to set RDE bit */ 1692static void udc_timer_function(unsigned long v) 1693{ 1694 u32 tmp; 1695 1696 spin_lock_irq(&udc_irq_spinlock); 1697 1698 if (set_rde > 0) { 1699 /* 1700 * open the fifo if fifo was filled on last timer call 1701 * conditionally 1702 */ 1703 if (set_rde > 1) { 1704 /* set RDE to receive setup data */ 1705 tmp = readl(&udc->regs->ctl); 1706 tmp |= AMD_BIT(UDC_DEVCTL_RDE); 1707 writel(tmp, &udc->regs->ctl); 1708 set_rde = -1; 1709 } else if (readl(&udc->regs->sts) 1710 & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) { 1711 /* 1712 * if fifo empty setup polling, do not just 1713 * open the fifo 1714 */ 1715 udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV; 1716 if (!stop_timer) 1717 add_timer(&udc_timer); 1718 } else { 1719 /* 1720 * fifo contains data now, setup timer for opening 1721 * the fifo when timer expires to be able to receive 1722 * setup packets, when data packets gets queued by 1723 * gadget layer then timer will forced to expire with 1724 * set_rde=0 (RDE is set in udc_queue()) 1725 */ 1726 set_rde++; 1727 /* debug: lhadmot_timer_start = 221070 */ 1728 udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS; 1729 if (!stop_timer) 1730 add_timer(&udc_timer); 1731 } 1732 1733 } else 1734 set_rde = -1; /* RDE was set by udc_queue() */ 1735 spin_unlock_irq(&udc_irq_spinlock); 1736 if (stop_timer) 1737 complete(&on_exit); 1738 1739} 1740 1741/* Handle halt state, used in stall poll timer */ 1742static void udc_handle_halt_state(struct udc_ep *ep) 1743{ 1744 u32 tmp; 1745 /* set stall as long not halted */ 1746 if (ep->halted == 1) { 1747 tmp = readl(&ep->regs->ctl); 1748 /* STALL cleared ? */ 1749 if (!(tmp & AMD_BIT(UDC_EPCTL_S))) { 1750 /* 1751 * FIXME: MSC spec requires that stall remains 1752 * even on receivng of CLEAR_FEATURE HALT. So 1753 * we would set STALL again here to be compliant. 1754 * But with current mass storage drivers this does 1755 * not work (would produce endless host retries). 1756 * So we clear halt on CLEAR_FEATURE. 1757 * 1758 DBG(ep->dev, "ep %d: set STALL again\n", ep->num); 1759 tmp |= AMD_BIT(UDC_EPCTL_S); 1760 writel(tmp, &ep->regs->ctl);*/ 1761 1762 /* clear NAK by writing CNAK */ 1763 tmp |= AMD_BIT(UDC_EPCTL_CNAK); 1764 writel(tmp, &ep->regs->ctl); 1765 ep->halted = 0; 1766 UDC_QUEUE_CNAK(ep, ep->num); 1767 } 1768 } 1769} 1770 1771/* Stall timer callback to poll S bit and set it again after */ 1772static void udc_pollstall_timer_function(unsigned long v) 1773{ 1774 struct udc_ep *ep; 1775 int halted = 0; 1776 1777 spin_lock_irq(&udc_stall_spinlock); 1778 /* 1779 * only one IN and OUT endpoints are handled 1780 * IN poll stall 1781 */ 1782 ep = &udc->ep[UDC_EPIN_IX]; 1783 udc_handle_halt_state(ep); 1784 if (ep->halted) 1785 halted = 1; 1786 /* OUT poll stall */ 1787 ep = &udc->ep[UDC_EPOUT_IX]; 1788 udc_handle_halt_state(ep); 1789 if (ep->halted) 1790 halted = 1; 1791 1792 /* setup timer again when still halted */ 1793 if (!stop_pollstall_timer && halted) { 1794 udc_pollstall_timer.expires = jiffies + 1795 HZ * UDC_POLLSTALL_TIMER_USECONDS 1796 / (1000 * 1000); 1797 add_timer(&udc_pollstall_timer); 1798 } 1799 spin_unlock_irq(&udc_stall_spinlock); 1800 1801 if (stop_pollstall_timer) 1802 complete(&on_pollstall_exit); 1803} 1804 1805/* Inits endpoint 0 so that SETUP packets are processed */ 1806static void activate_control_endpoints(struct udc *dev) 1807{ 1808 u32 tmp; 1809 1810 DBG(dev, "activate_control_endpoints\n"); 1811 1812 /* flush fifo */ 1813 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); 1814 tmp |= AMD_BIT(UDC_EPCTL_F); 1815 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); 1816 1817 /* set ep0 directions */ 1818 dev->ep[UDC_EP0IN_IX].in = 1; 1819 dev->ep[UDC_EP0OUT_IX].in = 0; 1820 1821 /* set buffer size (tx fifo entries) of EP0_IN */ 1822 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum); 1823 if (dev->gadget.speed == USB_SPEED_FULL) 1824 tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE, 1825 UDC_EPIN_BUFF_SIZE); 1826 else if (dev->gadget.speed == USB_SPEED_HIGH) 1827 tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE, 1828 UDC_EPIN_BUFF_SIZE); 1829 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum); 1830 1831 /* set max packet size of EP0_IN */ 1832 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt); 1833 if (dev->gadget.speed == USB_SPEED_FULL) 1834 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE, 1835 UDC_EP_MAX_PKT_SIZE); 1836 else if (dev->gadget.speed == USB_SPEED_HIGH) 1837 tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE, 1838 UDC_EP_MAX_PKT_SIZE); 1839 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt); 1840 1841 /* set max packet size of EP0_OUT */ 1842 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt); 1843 if (dev->gadget.speed == USB_SPEED_FULL) 1844 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE, 1845 UDC_EP_MAX_PKT_SIZE); 1846 else if (dev->gadget.speed == USB_SPEED_HIGH) 1847 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE, 1848 UDC_EP_MAX_PKT_SIZE); 1849 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt); 1850 1851 /* set max packet size of EP0 in UDC CSR */ 1852 tmp = readl(&dev->csr->ne[0]); 1853 if (dev->gadget.speed == USB_SPEED_FULL) 1854 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE, 1855 UDC_CSR_NE_MAX_PKT); 1856 else if (dev->gadget.speed == USB_SPEED_HIGH) 1857 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE, 1858 UDC_CSR_NE_MAX_PKT); 1859 writel(tmp, &dev->csr->ne[0]); 1860 1861 if (use_dma) { 1862 dev->ep[UDC_EP0OUT_IX].td->status |= 1863 AMD_BIT(UDC_DMA_OUT_STS_L); 1864 /* write dma desc address */ 1865 writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma, 1866 &dev->ep[UDC_EP0OUT_IX].regs->subptr); 1867 writel(dev->ep[UDC_EP0OUT_IX].td_phys, 1868 &dev->ep[UDC_EP0OUT_IX].regs->desptr); 1869 /* stop RDE timer */ 1870 if (timer_pending(&udc_timer)) { 1871 set_rde = 0; 1872 mod_timer(&udc_timer, jiffies - 1); 1873 } 1874 /* stop pollstall timer */ 1875 if (timer_pending(&udc_pollstall_timer)) 1876 mod_timer(&udc_pollstall_timer, jiffies - 1); 1877 /* enable DMA */ 1878 tmp = readl(&dev->regs->ctl); 1879 tmp |= AMD_BIT(UDC_DEVCTL_MODE) 1880 | AMD_BIT(UDC_DEVCTL_RDE) 1881 | AMD_BIT(UDC_DEVCTL_TDE); 1882 if (use_dma_bufferfill_mode) 1883 tmp |= AMD_BIT(UDC_DEVCTL_BF); 1884 else if (use_dma_ppb_du) 1885 tmp |= AMD_BIT(UDC_DEVCTL_DU); 1886 writel(tmp, &dev->regs->ctl); 1887 } 1888 1889 /* clear NAK by writing CNAK for EP0IN */ 1890 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); 1891 tmp |= AMD_BIT(UDC_EPCTL_CNAK); 1892 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); 1893 dev->ep[UDC_EP0IN_IX].naking = 0; 1894 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX); 1895 1896 /* clear NAK by writing CNAK for EP0OUT */ 1897 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); 1898 tmp |= AMD_BIT(UDC_EPCTL_CNAK); 1899 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl); 1900 dev->ep[UDC_EP0OUT_IX].naking = 0; 1901 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX); 1902} 1903 1904/* Make endpoint 0 ready for control traffic */ 1905static int setup_ep0(struct udc *dev) 1906{ 1907 activate_control_endpoints(dev); 1908 /* enable ep0 interrupts */ 1909 udc_enable_ep0_interrupts(dev); 1910 /* enable device setup interrupts */ 1911 udc_enable_dev_setup_interrupts(dev); 1912 1913 return 0; 1914} 1915 1916/* Called by gadget driver to register itself */ 1917static int amd5536_start(struct usb_gadget_driver *driver, 1918 int (*bind)(struct usb_gadget *)) 1919{ 1920 struct udc *dev = udc; 1921 int retval; 1922 u32 tmp; 1923 1924 if (!driver || !bind || !driver->setup 1925 || driver->max_speed < USB_SPEED_HIGH) 1926 return -EINVAL; 1927 if (!dev) 1928 return -ENODEV; 1929 if (dev->driver) 1930 return -EBUSY; 1931 1932 driver->driver.bus = NULL; 1933 dev->driver = driver; 1934 dev->gadget.dev.driver = &driver->driver; 1935 1936 retval = bind(&dev->gadget); 1937 1938 /* Some gadget drivers use both ep0 directions. 1939 * NOTE: to gadget driver, ep0 is just one endpoint... 1940 */ 1941 dev->ep[UDC_EP0OUT_IX].ep.driver_data = 1942 dev->ep[UDC_EP0IN_IX].ep.driver_data; 1943 1944 if (retval) { 1945 DBG(dev, "binding to %s returning %d\n", 1946 driver->driver.name, retval); 1947 dev->driver = NULL; 1948 dev->gadget.dev.driver = NULL; 1949 return retval; 1950 } 1951 1952 /* get ready for ep0 traffic */ 1953 setup_ep0(dev); 1954 1955 /* clear SD */ 1956 tmp = readl(&dev->regs->ctl); 1957 tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD); 1958 writel(tmp, &dev->regs->ctl); 1959 1960 usb_connect(dev); 1961 1962 return 0; 1963} 1964 1965/* shutdown requests and disconnect from gadget */ 1966static void 1967shutdown(struct udc *dev, struct usb_gadget_driver *driver) 1968__releases(dev->lock) 1969__acquires(dev->lock) 1970{ 1971 int tmp; 1972 1973 if (dev->gadget.speed != USB_SPEED_UNKNOWN) { 1974 spin_unlock(&dev->lock); 1975 driver->disconnect(&dev->gadget); 1976 spin_lock(&dev->lock); 1977 } 1978 1979 /* empty queues and init hardware */ 1980 udc_basic_init(dev); 1981 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) 1982 empty_req_queue(&dev->ep[tmp]); 1983 1984 udc_setup_endpoints(dev); 1985} 1986 1987/* Called by gadget driver to unregister itself */ 1988static int amd5536_stop(struct usb_gadget_driver *driver) 1989{ 1990 struct udc *dev = udc; 1991 unsigned long flags; 1992 u32 tmp; 1993 1994 if (!dev) 1995 return -ENODEV; 1996 if (!driver || driver != dev->driver || !driver->unbind) 1997 return -EINVAL; 1998 1999 spin_lock_irqsave(&dev->lock, flags); 2000 udc_mask_unused_interrupts(dev); 2001 shutdown(dev, driver); 2002 spin_unlock_irqrestore(&dev->lock, flags); 2003 2004 driver->unbind(&dev->gadget); 2005 dev->gadget.dev.driver = NULL; 2006 dev->driver = NULL; 2007 2008 /* set SD */ 2009 tmp = readl(&dev->regs->ctl); 2010 tmp |= AMD_BIT(UDC_DEVCTL_SD); 2011 writel(tmp, &dev->regs->ctl); 2012 2013 2014 DBG(dev, "%s: unregistered\n", driver->driver.name); 2015 2016 return 0; 2017} 2018 2019/* Clear pending NAK bits */ 2020static void udc_process_cnak_queue(struct udc *dev) 2021{ 2022 u32 tmp; 2023 u32 reg; 2024 2025 /* check epin's */ 2026 DBG(dev, "CNAK pending queue processing\n"); 2027 for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) { 2028 if (cnak_pending & (1 << tmp)) { 2029 DBG(dev, "CNAK pending for ep%d\n", tmp); 2030 /* clear NAK by writing CNAK */ 2031 reg = readl(&dev->ep[tmp].regs->ctl); 2032 reg |= AMD_BIT(UDC_EPCTL_CNAK); 2033 writel(reg, &dev->ep[tmp].regs->ctl); 2034 dev->ep[tmp].naking = 0; 2035 UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num); 2036 } 2037 } 2038 /* ... and ep0out */ 2039 if (cnak_pending & (1 << UDC_EP0OUT_IX)) { 2040 DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX); 2041 /* clear NAK by writing CNAK */ 2042 reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); 2043 reg |= AMD_BIT(UDC_EPCTL_CNAK); 2044 writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl); 2045 dev->ep[UDC_EP0OUT_IX].naking = 0; 2046 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], 2047 dev->ep[UDC_EP0OUT_IX].num); 2048 } 2049} 2050 2051/* Enabling RX DMA after setup packet */ 2052static void udc_ep0_set_rde(struct udc *dev) 2053{ 2054 if (use_dma) { 2055 /* 2056 * only enable RXDMA when no data endpoint enabled 2057 * or data is queued 2058 */ 2059 if (!dev->data_ep_enabled || dev->data_ep_queued) { 2060 udc_set_rde(dev); 2061 } else { 2062 /* 2063 * setup timer for enabling RDE (to not enable 2064 * RXFIFO DMA for data endpoints to early) 2065 */ 2066 if (set_rde != 0 && !timer_pending(&udc_timer)) { 2067 udc_timer.expires = 2068 jiffies + HZ/UDC_RDE_TIMER_DIV; 2069 set_rde = 1; 2070 if (!stop_timer) 2071 add_timer(&udc_timer); 2072 } 2073 } 2074 } 2075} 2076 2077 2078/* Interrupt handler for data OUT traffic */ 2079static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix) 2080{ 2081 irqreturn_t ret_val = IRQ_NONE; 2082 u32 tmp; 2083 struct udc_ep *ep; 2084 struct udc_request *req; 2085 unsigned int count; 2086 struct udc_data_dma *td = NULL; 2087 unsigned dma_done; 2088 2089 VDBG(dev, "ep%d irq\n", ep_ix); 2090 ep = &dev->ep[ep_ix]; 2091 2092 tmp = readl(&ep->regs->sts); 2093 if (use_dma) { 2094 /* BNA event ? */ 2095 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) { 2096 DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n", 2097 ep->num, readl(&ep->regs->desptr)); 2098 /* clear BNA */ 2099 writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts); 2100 if (!ep->cancel_transfer) 2101 ep->bna_occurred = 1; 2102 else 2103 ep->cancel_transfer = 0; 2104 ret_val = IRQ_HANDLED; 2105 goto finished; 2106 } 2107 } 2108 /* HE event ? */ 2109 if (tmp & AMD_BIT(UDC_EPSTS_HE)) { 2110 dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num); 2111 2112 /* clear HE */ 2113 writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts); 2114 ret_val = IRQ_HANDLED; 2115 goto finished; 2116 } 2117 2118 if (!list_empty(&ep->queue)) { 2119 2120 /* next request */ 2121 req = list_entry(ep->queue.next, 2122 struct udc_request, queue); 2123 } else { 2124 req = NULL; 2125 udc_rxfifo_pending = 1; 2126 } 2127 VDBG(dev, "req = %p\n", req); 2128 /* fifo mode */ 2129 if (!use_dma) { 2130 2131 /* read fifo */ 2132 if (req && udc_rxfifo_read(ep, req)) { 2133 ret_val = IRQ_HANDLED; 2134 2135 /* finish */ 2136 complete_req(ep, req, 0); 2137 /* next request */ 2138 if (!list_empty(&ep->queue) && !ep->halted) { 2139 req = list_entry(ep->queue.next, 2140 struct udc_request, queue); 2141 } else 2142 req = NULL; 2143 } 2144 2145 /* DMA */ 2146 } else if (!ep->cancel_transfer && req != NULL) { 2147 ret_val = IRQ_HANDLED; 2148 2149 /* check for DMA done */ 2150 if (!use_dma_ppb) { 2151 dma_done = AMD_GETBITS(req->td_data->status, 2152 UDC_DMA_OUT_STS_BS); 2153 /* packet per buffer mode - rx bytes */ 2154 } else { 2155 /* 2156 * if BNA occurred then recover desc. from 2157 * BNA dummy desc. 2158 */ 2159 if (ep->bna_occurred) { 2160 VDBG(dev, "Recover desc. from BNA dummy\n"); 2161 memcpy(req->td_data, ep->bna_dummy_req->td_data, 2162 sizeof(struct udc_data_dma)); 2163 ep->bna_occurred = 0; 2164 udc_init_bna_dummy(ep->req); 2165 } 2166 td = udc_get_last_dma_desc(req); 2167 dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS); 2168 } 2169 if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) { 2170 /* buffer fill mode - rx bytes */ 2171 if (!use_dma_ppb) { 2172 /* received number bytes */ 2173 count = AMD_GETBITS(req->td_data->status, 2174 UDC_DMA_OUT_STS_RXBYTES); 2175 VDBG(dev, "rx bytes=%u\n", count); 2176 /* packet per buffer mode - rx bytes */ 2177 } else { 2178 VDBG(dev, "req->td_data=%p\n", req->td_data); 2179 VDBG(dev, "last desc = %p\n", td); 2180 /* received number bytes */ 2181 if (use_dma_ppb_du) { 2182 /* every desc. counts bytes */ 2183 count = udc_get_ppbdu_rxbytes(req); 2184 } else { 2185 /* last desc. counts bytes */ 2186 count = AMD_GETBITS(td->status, 2187 UDC_DMA_OUT_STS_RXBYTES); 2188 if (!count && req->req.length 2189 == UDC_DMA_MAXPACKET) { 2190 /* 2191 * on 64k packets the RXBYTES 2192 * field is zero 2193 */ 2194 count = UDC_DMA_MAXPACKET; 2195 } 2196 } 2197 VDBG(dev, "last desc rx bytes=%u\n", count); 2198 } 2199 2200 tmp = req->req.length - req->req.actual; 2201 if (count > tmp) { 2202 if ((tmp % ep->ep.maxpacket) != 0) { 2203 DBG(dev, "%s: rx %db, space=%db\n", 2204 ep->ep.name, count, tmp); 2205 req->req.status = -EOVERFLOW; 2206 } 2207 count = tmp; 2208 } 2209 req->req.actual += count; 2210 req->dma_going = 0; 2211 /* complete request */ 2212 complete_req(ep, req, 0); 2213 2214 /* next request */ 2215 if (!list_empty(&ep->queue) && !ep->halted) { 2216 req = list_entry(ep->queue.next, 2217 struct udc_request, 2218 queue); 2219 /* 2220 * DMA may be already started by udc_queue() 2221 * called by gadget drivers completion 2222 * routine. This happens when queue 2223 * holds one request only. 2224 */ 2225 if (req->dma_going == 0) { 2226 /* next dma */ 2227 if (prep_dma(ep, req, GFP_ATOMIC) != 0) 2228 goto finished; 2229 /* write desc pointer */ 2230 writel(req->td_phys, 2231 &ep->regs->desptr); 2232 req->dma_going = 1; 2233 /* enable DMA */ 2234 udc_set_rde(dev); 2235 } 2236 } else { 2237 /* 2238 * implant BNA dummy descriptor to allow 2239 * RXFIFO opening by RDE 2240 */ 2241 if (ep->bna_dummy_req) { 2242 /* write desc pointer */ 2243 writel(ep->bna_dummy_req->td_phys, 2244 &ep->regs->desptr); 2245 ep->bna_occurred = 0; 2246 } 2247 2248 /* 2249 * schedule timer for setting RDE if queue 2250 * remains empty to allow ep0 packets pass 2251 * through 2252 */ 2253 if (set_rde != 0 2254 && !timer_pending(&udc_timer)) { 2255 udc_timer.expires = 2256 jiffies 2257 + HZ*UDC_RDE_TIMER_SECONDS; 2258 set_rde = 1; 2259 if (!stop_timer) 2260 add_timer(&udc_timer); 2261 } 2262 if (ep->num != UDC_EP0OUT_IX) 2263 dev->data_ep_queued = 0; 2264 } 2265 2266 } else { 2267 /* 2268 * RX DMA must be reenabled for each desc in PPBDU mode 2269 * and must be enabled for PPBNDU mode in case of BNA 2270 */ 2271 udc_set_rde(dev); 2272 } 2273 2274 } else if (ep->cancel_transfer) { 2275 ret_val = IRQ_HANDLED; 2276 ep->cancel_transfer = 0; 2277 } 2278 2279 /* check pending CNAKS */ 2280 if (cnak_pending) { 2281 /* CNAk processing when rxfifo empty only */ 2282 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) 2283 udc_process_cnak_queue(dev); 2284 } 2285 2286 /* clear OUT bits in ep status */ 2287 writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts); 2288finished: 2289 return ret_val; 2290} 2291 2292/* Interrupt handler for data IN traffic */ 2293static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix) 2294{ 2295 irqreturn_t ret_val = IRQ_NONE; 2296 u32 tmp; 2297 u32 epsts; 2298 struct udc_ep *ep; 2299 struct udc_request *req; 2300 struct udc_data_dma *td; 2301 unsigned dma_done; 2302 unsigned len; 2303 2304 ep = &dev->ep[ep_ix]; 2305 2306 epsts = readl(&ep->regs->sts); 2307 if (use_dma) { 2308 /* BNA ? */ 2309 if (epsts & AMD_BIT(UDC_EPSTS_BNA)) { 2310 dev_err(&dev->pdev->dev, 2311 "BNA ep%din occurred - DESPTR = %08lx\n", 2312 ep->num, 2313 (unsigned long) readl(&ep->regs->desptr)); 2314 2315 /* clear BNA */ 2316 writel(epsts, &ep->regs->sts); 2317 ret_val = IRQ_HANDLED; 2318 goto finished; 2319 } 2320 } 2321 /* HE event ? */ 2322 if (epsts & AMD_BIT(UDC_EPSTS_HE)) { 2323 dev_err(&dev->pdev->dev, 2324 "HE ep%dn occurred - DESPTR = %08lx\n", 2325 ep->num, (unsigned long) readl(&ep->regs->desptr)); 2326 2327 /* clear HE */ 2328 writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts); 2329 ret_val = IRQ_HANDLED; 2330 goto finished; 2331 } 2332 2333 /* DMA completion */ 2334 if (epsts & AMD_BIT(UDC_EPSTS_TDC)) { 2335 VDBG(dev, "TDC set- completion\n"); 2336 ret_val = IRQ_HANDLED; 2337 if (!ep->cancel_transfer && !list_empty(&ep->queue)) { 2338 req = list_entry(ep->queue.next, 2339 struct udc_request, queue); 2340 /* 2341 * length bytes transferred 2342 * check dma done of last desc. in PPBDU mode 2343 */ 2344 if (use_dma_ppb_du) { 2345 td = udc_get_last_dma_desc(req); 2346 if (td) { 2347 dma_done = 2348 AMD_GETBITS(td->status, 2349 UDC_DMA_IN_STS_BS); 2350 /* don't care DMA done */ 2351 req->req.actual = req->req.length; 2352 } 2353 } else { 2354 /* assume all bytes transferred */ 2355 req->req.actual = req->req.length; 2356 } 2357 2358 if (req->req.actual == req->req.length) { 2359 /* complete req */ 2360 complete_req(ep, req, 0); 2361 req->dma_going = 0; 2362 /* further request available ? */ 2363 if (list_empty(&ep->queue)) { 2364 /* disable interrupt */ 2365 tmp = readl(&dev->regs->ep_irqmsk); 2366 tmp |= AMD_BIT(ep->num); 2367 writel(tmp, &dev->regs->ep_irqmsk); 2368 } 2369 } 2370 } 2371 ep->cancel_transfer = 0; 2372 2373 } 2374 /* 2375 * status reg has IN bit set and TDC not set (if TDC was handled, 2376 * IN must not be handled (UDC defect) ? 2377 */ 2378 if ((epsts & AMD_BIT(UDC_EPSTS_IN)) 2379 && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) { 2380 ret_val = IRQ_HANDLED; 2381 if (!list_empty(&ep->queue)) { 2382 /* next request */ 2383 req = list_entry(ep->queue.next, 2384 struct udc_request, queue); 2385 /* FIFO mode */ 2386 if (!use_dma) { 2387 /* write fifo */ 2388 udc_txfifo_write(ep, &req->req); 2389 len = req->req.length - req->req.actual; 2390 if (len > ep->ep.maxpacket) 2391 len = ep->ep.maxpacket; 2392 req->req.actual += len; 2393 if (req->req.actual == req->req.length 2394 || (len != ep->ep.maxpacket)) { 2395 /* complete req */ 2396 complete_req(ep, req, 0); 2397 } 2398 /* DMA */ 2399 } else if (req && !req->dma_going) { 2400 VDBG(dev, "IN DMA : req=%p req->td_data=%p\n", 2401 req, req->td_data); 2402 if (req->td_data) { 2403 2404 req->dma_going = 1; 2405 2406 /* 2407 * unset L bit of first desc. 2408 * for chain 2409 */ 2410 if (use_dma_ppb && req->req.length > 2411 ep->ep.maxpacket) { 2412 req->td_data->status &= 2413 AMD_CLEAR_BIT( 2414 UDC_DMA_IN_STS_L); 2415 } 2416 2417 /* write desc pointer */ 2418 writel(req->td_phys, &ep->regs->desptr); 2419 2420 /* set HOST READY */ 2421 req->td_data->status = 2422 AMD_ADDBITS( 2423 req->td_data->status, 2424 UDC_DMA_IN_STS_BS_HOST_READY, 2425 UDC_DMA_IN_STS_BS); 2426 2427 /* set poll demand bit */ 2428 tmp = readl(&ep->regs->ctl); 2429 tmp |= AMD_BIT(UDC_EPCTL_P); 2430 writel(tmp, &ep->regs->ctl); 2431 } 2432 } 2433 2434 } else if (!use_dma && ep->in) { 2435 /* disable interrupt */ 2436 tmp = readl( 2437 &dev->regs->ep_irqmsk); 2438 tmp |= AMD_BIT(ep->num); 2439 writel(tmp, 2440 &dev->regs->ep_irqmsk); 2441 } 2442 } 2443 /* clear status bits */ 2444 writel(epsts, &ep->regs->sts); 2445 2446finished: 2447 return ret_val; 2448 2449} 2450 2451/* Interrupt handler for Control OUT traffic */ 2452static irqreturn_t udc_control_out_isr(struct udc *dev) 2453__releases(dev->lock) 2454__acquires(dev->lock) 2455{ 2456 irqreturn_t ret_val = IRQ_NONE; 2457 u32 tmp; 2458 int setup_supported; 2459 u32 count; 2460 int set = 0; 2461 struct udc_ep *ep; 2462 struct udc_ep *ep_tmp; 2463 2464 ep = &dev->ep[UDC_EP0OUT_IX]; 2465 2466 /* clear irq */ 2467 writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts); 2468 2469 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts); 2470 /* check BNA and clear if set */ 2471 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) { 2472 VDBG(dev, "ep0: BNA set\n"); 2473 writel(AMD_BIT(UDC_EPSTS_BNA), 2474 &dev->ep[UDC_EP0OUT_IX].regs->sts); 2475 ep->bna_occurred = 1; 2476 ret_val = IRQ_HANDLED; 2477 goto finished; 2478 } 2479 2480 /* type of data: SETUP or DATA 0 bytes */ 2481 tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT); 2482 VDBG(dev, "data_typ = %x\n", tmp); 2483 2484 /* setup data */ 2485 if (tmp == UDC_EPSTS_OUT_SETUP) { 2486 ret_val = IRQ_HANDLED; 2487 2488 ep->dev->stall_ep0in = 0; 2489 dev->waiting_zlp_ack_ep0in = 0; 2490 2491 /* set NAK for EP0_IN */ 2492 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); 2493 tmp |= AMD_BIT(UDC_EPCTL_SNAK); 2494 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); 2495 dev->ep[UDC_EP0IN_IX].naking = 1; 2496 /* get setup data */ 2497 if (use_dma) { 2498 2499 /* clear OUT bits in ep status */ 2500 writel(UDC_EPSTS_OUT_CLEAR, 2501 &dev->ep[UDC_EP0OUT_IX].regs->sts); 2502 2503 setup_data.data[0] = 2504 dev->ep[UDC_EP0OUT_IX].td_stp->data12; 2505 setup_data.data[1] = 2506 dev->ep[UDC_EP0OUT_IX].td_stp->data34; 2507 /* set HOST READY */ 2508 dev->ep[UDC_EP0OUT_IX].td_stp->status = 2509 UDC_DMA_STP_STS_BS_HOST_READY; 2510 } else { 2511 /* read fifo */ 2512 udc_rxfifo_read_dwords(dev, setup_data.data, 2); 2513 } 2514 2515 /* determine direction of control data */ 2516 if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) { 2517 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep; 2518 /* enable RDE */ 2519 udc_ep0_set_rde(dev); 2520 set = 0; 2521 } else { 2522 dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep; 2523 /* 2524 * implant BNA dummy descriptor to allow RXFIFO opening 2525 * by RDE 2526 */ 2527 if (ep->bna_dummy_req) { 2528 /* write desc pointer */ 2529 writel(ep->bna_dummy_req->td_phys, 2530 &dev->ep[UDC_EP0OUT_IX].regs->desptr); 2531 ep->bna_occurred = 0; 2532 } 2533 2534 set = 1; 2535 dev->ep[UDC_EP0OUT_IX].naking = 1; 2536 /* 2537 * setup timer for enabling RDE (to not enable 2538 * RXFIFO DMA for data to early) 2539 */ 2540 set_rde = 1; 2541 if (!timer_pending(&udc_timer)) { 2542 udc_timer.expires = jiffies + 2543 HZ/UDC_RDE_TIMER_DIV; 2544 if (!stop_timer) 2545 add_timer(&udc_timer); 2546 } 2547 } 2548 2549 /* 2550 * mass storage reset must be processed here because 2551 * next packet may be a CLEAR_FEATURE HALT which would not 2552 * clear the stall bit when no STALL handshake was received 2553 * before (autostall can cause this) 2554 */ 2555 if (setup_data.data[0] == UDC_MSCRES_DWORD0 2556 && setup_data.data[1] == UDC_MSCRES_DWORD1) { 2557 DBG(dev, "MSC Reset\n"); 2558 /* 2559 * clear stall bits 2560 * only one IN and OUT endpoints are handled 2561 */ 2562 ep_tmp = &udc->ep[UDC_EPIN_IX]; 2563 udc_set_halt(&ep_tmp->ep, 0); 2564 ep_tmp = &udc->ep[UDC_EPOUT_IX]; 2565 udc_set_halt(&ep_tmp->ep, 0); 2566 } 2567 2568 /* call gadget with setup data received */ 2569 spin_unlock(&dev->lock); 2570 setup_supported = dev->driver->setup(&dev->gadget, 2571 &setup_data.request); 2572 spin_lock(&dev->lock); 2573 2574 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); 2575 /* ep0 in returns data (not zlp) on IN phase */ 2576 if (setup_supported >= 0 && setup_supported < 2577 UDC_EP0IN_MAXPACKET) { 2578 /* clear NAK by writing CNAK in EP0_IN */ 2579 tmp |= AMD_BIT(UDC_EPCTL_CNAK); 2580 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); 2581 dev->ep[UDC_EP0IN_IX].naking = 0; 2582 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX); 2583 2584 /* if unsupported request then stall */ 2585 } else if (setup_supported < 0) { 2586 tmp |= AMD_BIT(UDC_EPCTL_S); 2587 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); 2588 } else 2589 dev->waiting_zlp_ack_ep0in = 1; 2590 2591 2592 /* clear NAK by writing CNAK in EP0_OUT */ 2593 if (!set) { 2594 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); 2595 tmp |= AMD_BIT(UDC_EPCTL_CNAK); 2596 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl); 2597 dev->ep[UDC_EP0OUT_IX].naking = 0; 2598 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX); 2599 } 2600 2601 if (!use_dma) { 2602 /* clear OUT bits in ep status */ 2603 writel(UDC_EPSTS_OUT_CLEAR, 2604 &dev->ep[UDC_EP0OUT_IX].regs->sts); 2605 } 2606 2607 /* data packet 0 bytes */ 2608 } else if (tmp == UDC_EPSTS_OUT_DATA) { 2609 /* clear OUT bits in ep status */ 2610 writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts); 2611 2612 /* get setup data: only 0 packet */ 2613 if (use_dma) { 2614 /* no req if 0 packet, just reactivate */ 2615 if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) { 2616 VDBG(dev, "ZLP\n"); 2617 2618 /* set HOST READY */ 2619 dev->ep[UDC_EP0OUT_IX].td->status = 2620 AMD_ADDBITS( 2621 dev->ep[UDC_EP0OUT_IX].td->status, 2622 UDC_DMA_OUT_STS_BS_HOST_READY, 2623 UDC_DMA_OUT_STS_BS); 2624 /* enable RDE */ 2625 udc_ep0_set_rde(dev); 2626 ret_val = IRQ_HANDLED; 2627 2628 } else { 2629 /* control write */ 2630 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX); 2631 /* re-program desc. pointer for possible ZLPs */ 2632 writel(dev->ep[UDC_EP0OUT_IX].td_phys, 2633 &dev->ep[UDC_EP0OUT_IX].regs->desptr); 2634 /* enable RDE */ 2635 udc_ep0_set_rde(dev); 2636 } 2637 } else { 2638 2639 /* received number bytes */ 2640 count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts); 2641 count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE); 2642 /* out data for fifo mode not working */ 2643 count = 0; 2644 2645 /* 0 packet or real data ? */ 2646 if (count != 0) { 2647 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX); 2648 } else { 2649 /* dummy read confirm */ 2650 readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm); 2651 ret_val = IRQ_HANDLED; 2652 } 2653 } 2654 } 2655 2656 /* check pending CNAKS */ 2657 if (cnak_pending) { 2658 /* CNAk processing when rxfifo empty only */ 2659 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) 2660 udc_process_cnak_queue(dev); 2661 } 2662 2663finished: 2664 return ret_val; 2665} 2666 2667/* Interrupt handler for Control IN traffic */ 2668static irqreturn_t udc_control_in_isr(struct udc *dev) 2669{ 2670 irqreturn_t ret_val = IRQ_NONE; 2671 u32 tmp; 2672 struct udc_ep *ep; 2673 struct udc_request *req; 2674 unsigned len; 2675 2676 ep = &dev->ep[UDC_EP0IN_IX]; 2677 2678 /* clear irq */ 2679 writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts); 2680 2681 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts); 2682 /* DMA completion */ 2683 if (tmp & AMD_BIT(UDC_EPSTS_TDC)) { 2684 VDBG(dev, "isr: TDC clear\n"); 2685 ret_val = IRQ_HANDLED; 2686 2687 /* clear TDC bit */ 2688 writel(AMD_BIT(UDC_EPSTS_TDC), 2689 &dev->ep[UDC_EP0IN_IX].regs->sts); 2690 2691 /* status reg has IN bit set ? */ 2692 } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) { 2693 ret_val = IRQ_HANDLED; 2694 2695 if (ep->dma) { 2696 /* clear IN bit */ 2697 writel(AMD_BIT(UDC_EPSTS_IN), 2698 &dev->ep[UDC_EP0IN_IX].regs->sts); 2699 } 2700 if (dev->stall_ep0in) { 2701 DBG(dev, "stall ep0in\n"); 2702 /* halt ep0in */ 2703 tmp = readl(&ep->regs->ctl); 2704 tmp |= AMD_BIT(UDC_EPCTL_S); 2705 writel(tmp, &ep->regs->ctl); 2706 } else { 2707 if (!list_empty(&ep->queue)) { 2708 /* next request */ 2709 req = list_entry(ep->queue.next, 2710 struct udc_request, queue); 2711 2712 if (ep->dma) { 2713 /* write desc pointer */ 2714 writel(req->td_phys, &ep->regs->desptr); 2715 /* set HOST READY */ 2716 req->td_data->status = 2717 AMD_ADDBITS( 2718 req->td_data->status, 2719 UDC_DMA_STP_STS_BS_HOST_READY, 2720 UDC_DMA_STP_STS_BS); 2721 2722 /* set poll demand bit */ 2723 tmp = 2724 readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); 2725 tmp |= AMD_BIT(UDC_EPCTL_P); 2726 writel(tmp, 2727 &dev->ep[UDC_EP0IN_IX].regs->ctl); 2728 2729 /* all bytes will be transferred */ 2730 req->req.actual = req->req.length; 2731 2732 /* complete req */ 2733 complete_req(ep, req, 0); 2734 2735 } else { 2736 /* write fifo */ 2737 udc_txfifo_write(ep, &req->req); 2738 2739 /* lengh bytes transferred */ 2740 len = req->req.length - req->req.actual; 2741 if (len > ep->ep.maxpacket) 2742 len = ep->ep.maxpacket; 2743 2744 req->req.actual += len; 2745 if (req->req.actual == req->req.length 2746 || (len != ep->ep.maxpacket)) { 2747 /* complete req */ 2748 complete_req(ep, req, 0); 2749 } 2750 } 2751 2752 } 2753 } 2754 ep->halted = 0; 2755 dev->stall_ep0in = 0; 2756 if (!ep->dma) { 2757 /* clear IN bit */ 2758 writel(AMD_BIT(UDC_EPSTS_IN), 2759 &dev->ep[UDC_EP0IN_IX].regs->sts); 2760 } 2761 } 2762 2763 return ret_val; 2764} 2765 2766 2767/* Interrupt handler for global device events */ 2768static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq) 2769__releases(dev->lock) 2770__acquires(dev->lock) 2771{ 2772 irqreturn_t ret_val = IRQ_NONE; 2773 u32 tmp; 2774 u32 cfg; 2775 struct udc_ep *ep; 2776 u16 i; 2777 u8 udc_csr_epix; 2778 2779 /* SET_CONFIG irq ? */ 2780 if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) { 2781 ret_val = IRQ_HANDLED; 2782 2783 /* read config value */ 2784 tmp = readl(&dev->regs->sts); 2785 cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG); 2786 DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg); 2787 dev->cur_config = cfg; 2788 dev->set_cfg_not_acked = 1; 2789 2790 /* make usb request for gadget driver */ 2791 memset(&setup_data, 0 , sizeof(union udc_setup_data)); 2792 setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION; 2793 setup_data.request.wValue = cpu_to_le16(dev->cur_config); 2794 2795 /* programm the NE registers */ 2796 for (i = 0; i < UDC_EP_NUM; i++) { 2797 ep = &dev->ep[i]; 2798 if (ep->in) { 2799 2800 /* ep ix in UDC CSR register space */ 2801 udc_csr_epix = ep->num; 2802 2803 2804 /* OUT ep */ 2805 } else { 2806 /* ep ix in UDC CSR register space */ 2807 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; 2808 } 2809 2810 tmp = readl(&dev->csr->ne[udc_csr_epix]); 2811 /* ep cfg */ 2812 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, 2813 UDC_CSR_NE_CFG); 2814 /* write reg */ 2815 writel(tmp, &dev->csr->ne[udc_csr_epix]); 2816 2817 /* clear stall bits */ 2818 ep->halted = 0; 2819 tmp = readl(&ep->regs->ctl); 2820 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); 2821 writel(tmp, &ep->regs->ctl); 2822 } 2823 /* call gadget zero with setup data received */ 2824 spin_unlock(&dev->lock); 2825 tmp = dev->driver->setup(&dev->gadget, &setup_data.request); 2826 spin_lock(&dev->lock); 2827 2828 } /* SET_INTERFACE ? */ 2829 if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) { 2830 ret_val = IRQ_HANDLED; 2831 2832 dev->set_cfg_not_acked = 1; 2833 /* read interface and alt setting values */ 2834 tmp = readl(&dev->regs->sts); 2835 dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT); 2836 dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF); 2837 2838 /* make usb request for gadget driver */ 2839 memset(&setup_data, 0 , sizeof(union udc_setup_data)); 2840 setup_data.request.bRequest = USB_REQ_SET_INTERFACE; 2841 setup_data.request.bRequestType = USB_RECIP_INTERFACE; 2842 setup_data.request.wValue = cpu_to_le16(dev->cur_alt); 2843 setup_data.request.wIndex = cpu_to_le16(dev->cur_intf); 2844 2845 DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n", 2846 dev->cur_alt, dev->cur_intf); 2847 2848 /* programm the NE registers */ 2849 for (i = 0; i < UDC_EP_NUM; i++) { 2850 ep = &dev->ep[i]; 2851 if (ep->in) { 2852 2853 /* ep ix in UDC CSR register space */ 2854 udc_csr_epix = ep->num; 2855 2856 2857 /* OUT ep */ 2858 } else { 2859 /* ep ix in UDC CSR register space */ 2860 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; 2861 } 2862 2863 /* UDC CSR reg */ 2864 /* set ep values */ 2865 tmp = readl(&dev->csr->ne[udc_csr_epix]); 2866 /* ep interface */ 2867 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, 2868 UDC_CSR_NE_INTF); 2869 /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */ 2870 /* ep alt */ 2871 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, 2872 UDC_CSR_NE_ALT); 2873 /* write reg */ 2874 writel(tmp, &dev->csr->ne[udc_csr_epix]); 2875 2876 /* clear stall bits */ 2877 ep->halted = 0; 2878 tmp = readl(&ep->regs->ctl); 2879 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); 2880 writel(tmp, &ep->regs->ctl); 2881 } 2882 2883 /* call gadget zero with setup data received */ 2884 spin_unlock(&dev->lock); 2885 tmp = dev->driver->setup(&dev->gadget, &setup_data.request); 2886 spin_lock(&dev->lock); 2887 2888 } /* USB reset */ 2889 if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) { 2890 DBG(dev, "USB Reset interrupt\n"); 2891 ret_val = IRQ_HANDLED; 2892 2893 /* allow soft reset when suspend occurs */ 2894 soft_reset_occured = 0; 2895 2896 dev->waiting_zlp_ack_ep0in = 0; 2897 dev->set_cfg_not_acked = 0; 2898 2899 /* mask not needed interrupts */ 2900 udc_mask_unused_interrupts(dev); 2901 2902 /* call gadget to resume and reset configs etc. */ 2903 spin_unlock(&dev->lock); 2904 if (dev->sys_suspended && dev->driver->resume) { 2905 dev->driver->resume(&dev->gadget); 2906 dev->sys_suspended = 0; 2907 } 2908 dev->driver->disconnect(&dev->gadget); 2909 spin_lock(&dev->lock); 2910 2911 /* disable ep0 to empty req queue */ 2912 empty_req_queue(&dev->ep[UDC_EP0IN_IX]); 2913 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]); 2914 2915 /* soft reset when rxfifo not empty */ 2916 tmp = readl(&dev->regs->sts); 2917 if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) 2918 && !soft_reset_after_usbreset_occured) { 2919 udc_soft_reset(dev); 2920 soft_reset_after_usbreset_occured++; 2921 } 2922 2923 /* 2924 * DMA reset to kill potential old DMA hw hang, 2925 * POLL bit is already reset by ep_init() through 2926 * disconnect() 2927 */ 2928 DBG(dev, "DMA machine reset\n"); 2929 tmp = readl(&dev->regs->cfg); 2930 writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg); 2931 writel(tmp, &dev->regs->cfg); 2932 2933 /* put into initial config */ 2934 udc_basic_init(dev); 2935 2936 /* enable device setup interrupts */ 2937 udc_enable_dev_setup_interrupts(dev); 2938 2939 /* enable suspend interrupt */ 2940 tmp = readl(&dev->regs->irqmsk); 2941 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US); 2942 writel(tmp, &dev->regs->irqmsk); 2943 2944 } /* USB suspend */ 2945 if (dev_irq & AMD_BIT(UDC_DEVINT_US)) { 2946 DBG(dev, "USB Suspend interrupt\n"); 2947 ret_val = IRQ_HANDLED; 2948 if (dev->driver->suspend) { 2949 spin_unlock(&dev->lock); 2950 dev->sys_suspended = 1; 2951 dev->driver->suspend(&dev->gadget); 2952 spin_lock(&dev->lock); 2953 } 2954 } /* new speed ? */ 2955 if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) { 2956 DBG(dev, "ENUM interrupt\n"); 2957 ret_val = IRQ_HANDLED; 2958 soft_reset_after_usbreset_occured = 0; 2959 2960 /* disable ep0 to empty req queue */ 2961 empty_req_queue(&dev->ep[UDC_EP0IN_IX]); 2962 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]); 2963 2964 /* link up all endpoints */ 2965 udc_setup_endpoints(dev); 2966 dev_info(&dev->pdev->dev, "Connect: %s\n", 2967 usb_speed_string(dev->gadget.speed)); 2968 2969 /* init ep 0 */ 2970 activate_control_endpoints(dev); 2971 2972 /* enable ep0 interrupts */ 2973 udc_enable_ep0_interrupts(dev); 2974 } 2975 /* session valid change interrupt */ 2976 if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) { 2977 DBG(dev, "USB SVC interrupt\n"); 2978 ret_val = IRQ_HANDLED; 2979 2980 /* check that session is not valid to detect disconnect */ 2981 tmp = readl(&dev->regs->sts); 2982 if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) { 2983 /* disable suspend interrupt */ 2984 tmp = readl(&dev->regs->irqmsk); 2985 tmp |= AMD_BIT(UDC_DEVINT_US); 2986 writel(tmp, &dev->regs->irqmsk); 2987 DBG(dev, "USB Disconnect (session valid low)\n"); 2988 /* cleanup on disconnect */ 2989 usb_disconnect(udc); 2990 } 2991 2992 } 2993 2994 return ret_val; 2995} 2996 2997/* Interrupt Service Routine, see Linux Kernel Doc for parameters */ 2998static irqreturn_t udc_irq(int irq, void *pdev) 2999{ 3000 struct udc *dev = pdev; 3001 u32 reg; 3002 u16 i; 3003 u32 ep_irq; 3004 irqreturn_t ret_val = IRQ_NONE; 3005 3006 spin_lock(&dev->lock); 3007 3008 /* check for ep irq */ 3009 reg = readl(&dev->regs->ep_irqsts); 3010 if (reg) { 3011 if (reg & AMD_BIT(UDC_EPINT_OUT_EP0)) 3012 ret_val |= udc_control_out_isr(dev); 3013 if (reg & AMD_BIT(UDC_EPINT_IN_EP0)) 3014 ret_val |= udc_control_in_isr(dev); 3015 3016 /* 3017 * data endpoint 3018 * iterate ep's 3019 */ 3020 for (i = 1; i < UDC_EP_NUM; i++) { 3021 ep_irq = 1 << i; 3022 if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0) 3023 continue; 3024 3025 /* clear irq status */ 3026 writel(ep_irq, &dev->regs->ep_irqsts); 3027 3028 /* irq for out ep ? */ 3029 if (i > UDC_EPIN_NUM) 3030 ret_val |= udc_data_out_isr(dev, i); 3031 else 3032 ret_val |= udc_data_in_isr(dev, i); 3033 } 3034 3035 } 3036 3037 3038 /* check for dev irq */ 3039 reg = readl(&dev->regs->irqsts); 3040 if (reg) { 3041 /* clear irq */ 3042 writel(reg, &dev->regs->irqsts); 3043 ret_val |= udc_dev_isr(dev, reg); 3044 } 3045 3046 3047 spin_unlock(&dev->lock); 3048 return ret_val; 3049} 3050 3051/* Tears down device */ 3052static void gadget_release(struct device *pdev) 3053{ 3054 struct amd5536udc *dev = dev_get_drvdata(pdev); 3055 kfree(dev); 3056} 3057 3058/* Cleanup on device remove */ 3059static void udc_remove(struct udc *dev) 3060{ 3061 /* remove timer */ 3062 stop_timer++; 3063 if (timer_pending(&udc_timer)) 3064 wait_for_completion(&on_exit); 3065 if (udc_timer.data) 3066 del_timer_sync(&udc_timer); 3067 /* remove pollstall timer */ 3068 stop_pollstall_timer++; 3069 if (timer_pending(&udc_pollstall_timer)) 3070 wait_for_completion(&on_pollstall_exit); 3071 if (udc_pollstall_timer.data) 3072 del_timer_sync(&udc_pollstall_timer); 3073 udc = NULL; 3074} 3075 3076/* Reset all pci context */ 3077static void udc_pci_remove(struct pci_dev *pdev) 3078{ 3079 struct udc *dev; 3080 3081 dev = pci_get_drvdata(pdev); 3082 3083 usb_del_gadget_udc(&udc->gadget); 3084 /* gadget driver must not be registered */ 3085 BUG_ON(dev->driver != NULL); 3086 3087 /* dma pool cleanup */ 3088 if (dev->data_requests) 3089 pci_pool_destroy(dev->data_requests); 3090 3091 if (dev->stp_requests) { 3092 /* cleanup DMA desc's for ep0in */ 3093 pci_pool_free(dev->stp_requests, 3094 dev->ep[UDC_EP0OUT_IX].td_stp, 3095 dev->ep[UDC_EP0OUT_IX].td_stp_dma); 3096 pci_pool_free(dev->stp_requests, 3097 dev->ep[UDC_EP0OUT_IX].td, 3098 dev->ep[UDC_EP0OUT_IX].td_phys); 3099 3100 pci_pool_destroy(dev->stp_requests); 3101 } 3102 3103 /* reset controller */ 3104 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); 3105 if (dev->irq_registered) 3106 free_irq(pdev->irq, dev); 3107 if (dev->regs) 3108 iounmap(dev->regs); 3109 if (dev->mem_region) 3110 release_mem_region(pci_resource_start(pdev, 0), 3111 pci_resource_len(pdev, 0)); 3112 if (dev->active) 3113 pci_disable_device(pdev); 3114 3115 device_unregister(&dev->gadget.dev); 3116 pci_set_drvdata(pdev, NULL); 3117 3118 udc_remove(dev); 3119} 3120 3121/* create dma pools on init */ 3122static int init_dma_pools(struct udc *dev) 3123{ 3124 struct udc_stp_dma *td_stp; 3125 struct udc_data_dma *td_data; 3126 int retval; 3127 3128 /* consistent DMA mode setting ? */ 3129 if (use_dma_ppb) { 3130 use_dma_bufferfill_mode = 0; 3131 } else { 3132 use_dma_ppb_du = 0; 3133 use_dma_bufferfill_mode = 1; 3134 } 3135 3136 /* DMA setup */ 3137 dev->data_requests = dma_pool_create("data_requests", NULL, 3138 sizeof(struct udc_data_dma), 0, 0); 3139 if (!dev->data_requests) { 3140 DBG(dev, "can't get request data pool\n"); 3141 retval = -ENOMEM; 3142 goto finished; 3143 } 3144 3145 /* EP0 in dma regs = dev control regs */ 3146 dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl; 3147 3148 /* dma desc for setup data */ 3149 dev->stp_requests = dma_pool_create("setup requests", NULL, 3150 sizeof(struct udc_stp_dma), 0, 0); 3151 if (!dev->stp_requests) { 3152 DBG(dev, "can't get stp request pool\n"); 3153 retval = -ENOMEM; 3154 goto finished; 3155 } 3156 /* setup */ 3157 td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL, 3158 &dev->ep[UDC_EP0OUT_IX].td_stp_dma); 3159 if (td_stp == NULL) { 3160 retval = -ENOMEM; 3161 goto finished; 3162 } 3163 dev->ep[UDC_EP0OUT_IX].td_stp = td_stp; 3164 3165 /* data: 0 packets !? */ 3166 td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL, 3167 &dev->ep[UDC_EP0OUT_IX].td_phys); 3168 if (td_data == NULL) { 3169 retval = -ENOMEM; 3170 goto finished; 3171 } 3172 dev->ep[UDC_EP0OUT_IX].td = td_data; 3173 return 0; 3174 3175finished: 3176 return retval; 3177} 3178 3179/* Called by pci bus driver to init pci context */ 3180static int udc_pci_probe( 3181 struct pci_dev *pdev, 3182 const struct pci_device_id *id 3183) 3184{ 3185 struct udc *dev; 3186 unsigned long resource; 3187 unsigned long len; 3188 int retval = 0; 3189 3190 /* one udc only */ 3191 if (udc) { 3192 dev_dbg(&pdev->dev, "already probed\n"); 3193 return -EBUSY; 3194 } 3195 3196 /* init */ 3197 dev = kzalloc(sizeof(struct udc), GFP_KERNEL); 3198 if (!dev) { 3199 retval = -ENOMEM; 3200 goto finished; 3201 } 3202 3203 /* pci setup */ 3204 if (pci_enable_device(pdev) < 0) { 3205 kfree(dev); 3206 dev = NULL; 3207 retval = -ENODEV; 3208 goto finished; 3209 } 3210 dev->active = 1; 3211 3212 /* PCI resource allocation */ 3213 resource = pci_resource_start(pdev, 0); 3214 len = pci_resource_len(pdev, 0); 3215 3216 if (!request_mem_region(resource, len, name)) { 3217 dev_dbg(&pdev->dev, "pci device used already\n"); 3218 kfree(dev); 3219 dev = NULL; 3220 retval = -EBUSY; 3221 goto finished; 3222 } 3223 dev->mem_region = 1; 3224 3225 dev->virt_addr = ioremap_nocache(resource, len); 3226 if (dev->virt_addr == NULL) { 3227 dev_dbg(&pdev->dev, "start address cannot be mapped\n"); 3228 kfree(dev); 3229 dev = NULL; 3230 retval = -EFAULT; 3231 goto finished; 3232 } 3233 3234 if (!pdev->irq) { 3235 dev_err(&dev->pdev->dev, "irq not set\n"); 3236 kfree(dev); 3237 dev = NULL; 3238 retval = -ENODEV; 3239 goto finished; 3240 } 3241 3242 spin_lock_init(&dev->lock); 3243 /* udc csr registers base */ 3244 dev->csr = dev->virt_addr + UDC_CSR_ADDR; 3245 /* dev registers base */ 3246 dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR; 3247 /* ep registers base */ 3248 dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR; 3249 /* fifo's base */ 3250 dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR); 3251 dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR); 3252 3253 if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) { 3254 dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq); 3255 kfree(dev); 3256 dev = NULL; 3257 retval = -EBUSY; 3258 goto finished; 3259 } 3260 dev->irq_registered = 1; 3261 3262 pci_set_drvdata(pdev, dev); 3263 3264 /* chip revision for Hs AMD5536 */ 3265 dev->chiprev = pdev->revision; 3266 3267 pci_set_master(pdev); 3268 pci_try_set_mwi(pdev); 3269 3270 /* init dma pools */ 3271 if (use_dma) { 3272 retval = init_dma_pools(dev); 3273 if (retval != 0) 3274 goto finished; 3275 } 3276 3277 dev->phys_addr = resource; 3278 dev->irq = pdev->irq; 3279 dev->pdev = pdev; 3280 dev->gadget.dev.parent = &pdev->dev; 3281 dev->gadget.dev.dma_mask = pdev->dev.dma_mask; 3282 3283 /* general probing */ 3284 if (udc_probe(dev) == 0) 3285 return 0; 3286 3287finished: 3288 if (dev) 3289 udc_pci_remove(pdev); 3290 return retval; 3291} 3292 3293/* general probe */ 3294static int udc_probe(struct udc *dev) 3295{ 3296 char tmp[128]; 3297 u32 reg; 3298 int retval; 3299 3300 /* mark timer as not initialized */ 3301 udc_timer.data = 0; 3302 udc_pollstall_timer.data = 0; 3303 3304 /* device struct setup */ 3305 dev->gadget.ops = &udc_ops; 3306 3307 dev_set_name(&dev->gadget.dev, "gadget"); 3308 dev->gadget.dev.release = gadget_release; 3309 dev->gadget.name = name; 3310 dev->gadget.max_speed = USB_SPEED_HIGH; 3311 3312 /* init registers, interrupts, ... */ 3313 startup_registers(dev); 3314 3315 dev_info(&dev->pdev->dev, "%s\n", mod_desc); 3316 3317 snprintf(tmp, sizeof tmp, "%d", dev->irq); 3318 dev_info(&dev->pdev->dev, 3319 "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n", 3320 tmp, dev->phys_addr, dev->chiprev, 3321 (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1"); 3322 strcpy(tmp, UDC_DRIVER_VERSION_STRING); 3323 if (dev->chiprev == UDC_HSA0_REV) { 3324 dev_err(&dev->pdev->dev, "chip revision is A0; too old\n"); 3325 retval = -ENODEV; 3326 goto finished; 3327 } 3328 dev_info(&dev->pdev->dev, 3329 "driver version: %s(for Geode5536 B1)\n", tmp); 3330 udc = dev; 3331 3332 retval = usb_add_gadget_udc(&udc->pdev->dev, &dev->gadget); 3333 if (retval) 3334 goto finished; 3335 3336 retval = device_register(&dev->gadget.dev); 3337 if (retval) { 3338 usb_del_gadget_udc(&dev->gadget); 3339 put_device(&dev->gadget.dev); 3340 goto finished; 3341 } 3342 3343 /* timer init */ 3344 init_timer(&udc_timer); 3345 udc_timer.function = udc_timer_function; 3346 udc_timer.data = 1; 3347 /* timer pollstall init */ 3348 init_timer(&udc_pollstall_timer); 3349 udc_pollstall_timer.function = udc_pollstall_timer_function; 3350 udc_pollstall_timer.data = 1; 3351 3352 /* set SD */ 3353 reg = readl(&dev->regs->ctl); 3354 reg |= AMD_BIT(UDC_DEVCTL_SD); 3355 writel(reg, &dev->regs->ctl); 3356 3357 /* print dev register info */ 3358 print_regs(dev); 3359 3360 return 0; 3361 3362finished: 3363 return retval; 3364} 3365 3366/* Initiates a remote wakeup */ 3367static int udc_remote_wakeup(struct udc *dev) 3368{ 3369 unsigned long flags; 3370 u32 tmp; 3371 3372 DBG(dev, "UDC initiates remote wakeup\n"); 3373 3374 spin_lock_irqsave(&dev->lock, flags); 3375 3376 tmp = readl(&dev->regs->ctl); 3377 tmp |= AMD_BIT(UDC_DEVCTL_RES); 3378 writel(tmp, &dev->regs->ctl); 3379 tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES); 3380 writel(tmp, &dev->regs->ctl); 3381 3382 spin_unlock_irqrestore(&dev->lock, flags); 3383 return 0; 3384} 3385 3386/* PCI device parameters */ 3387static DEFINE_PCI_DEVICE_TABLE(pci_id) = { 3388 { 3389 PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096), 3390 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, 3391 .class_mask = 0xffffffff, 3392 }, 3393 {}, 3394}; 3395MODULE_DEVICE_TABLE(pci, pci_id); 3396 3397/* PCI functions */ 3398static struct pci_driver udc_pci_driver = { 3399 .name = (char *) name, 3400 .id_table = pci_id, 3401 .probe = udc_pci_probe, 3402 .remove = udc_pci_remove, 3403}; 3404 3405/* Inits driver */ 3406static int __init init(void) 3407{ 3408 return pci_register_driver(&udc_pci_driver); 3409} 3410module_init(init); 3411 3412/* Cleans driver */ 3413static void __exit cleanup(void) 3414{ 3415 pci_unregister_driver(&udc_pci_driver); 3416} 3417module_exit(cleanup); 3418 3419MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION); 3420MODULE_AUTHOR("Thomas Dahlmann"); 3421MODULE_LICENSE("GPL"); 3422