Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.32 1555 lines 45 kB view raw
1/* 2 * Copyright (C) 2005-2006 by Texas Instruments 3 * 4 * This file implements a DMA interface using TI's CPPI DMA. 5 * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB. 6 * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci. 7 */ 8 9#include <linux/platform_device.h> 10#include <linux/usb.h> 11 12#include "musb_core.h" 13#include "musb_debug.h" 14#include "cppi_dma.h" 15 16 17/* CPPI DMA status 7-mar-2006: 18 * 19 * - See musb_{host,gadget}.c for more info 20 * 21 * - Correct RX DMA generally forces the engine into irq-per-packet mode, 22 * which can easily saturate the CPU under non-mass-storage loads. 23 * 24 * NOTES 24-aug-2006 (2.6.18-rc4): 25 * 26 * - peripheral RXDMA wedged in a test with packets of length 512/512/1. 27 * evidently after the 1 byte packet was received and acked, the queue 28 * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003, 29 * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401 30 * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx 31 * of its next (512 byte) packet. IRQ issues? 32 * 33 * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will 34 * evidently also directly update the RX and TX CSRs ... so audit all 35 * host and peripheral side DMA code to avoid CSR access after DMA has 36 * been started. 37 */ 38 39/* REVISIT now we can avoid preallocating these descriptors; or 40 * more simply, switch to a global freelist not per-channel ones. 41 * Note: at full speed, 64 descriptors == 4K bulk data. 42 */ 43#define NUM_TXCHAN_BD 64 44#define NUM_RXCHAN_BD 64 45 46static inline void cpu_drain_writebuffer(void) 47{ 48 wmb(); 49#ifdef CONFIG_CPU_ARM926T 50 /* REVISIT this "should not be needed", 51 * but lack of it sure seemed to hurt ... 52 */ 53 asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n"); 54#endif 55} 56 57static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c) 58{ 59 struct cppi_descriptor *bd = c->freelist; 60 61 if (bd) 62 c->freelist = bd->next; 63 return bd; 64} 65 66static inline void 67cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd) 68{ 69 if (!bd) 70 return; 71 bd->next = c->freelist; 72 c->freelist = bd; 73} 74 75/* 76 * Start DMA controller 77 * 78 * Initialize the DMA controller as necessary. 79 */ 80 81/* zero out entire rx state RAM entry for the channel */ 82static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx) 83{ 84 musb_writel(&rx->rx_skipbytes, 0, 0); 85 musb_writel(&rx->rx_head, 0, 0); 86 musb_writel(&rx->rx_sop, 0, 0); 87 musb_writel(&rx->rx_current, 0, 0); 88 musb_writel(&rx->rx_buf_current, 0, 0); 89 musb_writel(&rx->rx_len_len, 0, 0); 90 musb_writel(&rx->rx_cnt_cnt, 0, 0); 91} 92 93/* zero out entire tx state RAM entry for the channel */ 94static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr) 95{ 96 musb_writel(&tx->tx_head, 0, 0); 97 musb_writel(&tx->tx_buf, 0, 0); 98 musb_writel(&tx->tx_current, 0, 0); 99 musb_writel(&tx->tx_buf_current, 0, 0); 100 musb_writel(&tx->tx_info, 0, 0); 101 musb_writel(&tx->tx_rem_len, 0, 0); 102 /* musb_writel(&tx->tx_dummy, 0, 0); */ 103 musb_writel(&tx->tx_complete, 0, ptr); 104} 105 106static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) 107{ 108 int j; 109 110 /* initialize channel fields */ 111 c->head = NULL; 112 c->tail = NULL; 113 c->last_processed = NULL; 114 c->channel.status = MUSB_DMA_STATUS_UNKNOWN; 115 c->controller = cppi; 116 c->is_rndis = 0; 117 c->freelist = NULL; 118 119 /* build the BD Free list for the channel */ 120 for (j = 0; j < NUM_TXCHAN_BD + 1; j++) { 121 struct cppi_descriptor *bd; 122 dma_addr_t dma; 123 124 bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma); 125 bd->dma = dma; 126 cppi_bd_free(c, bd); 127 } 128} 129 130static int cppi_channel_abort(struct dma_channel *); 131 132static void cppi_pool_free(struct cppi_channel *c) 133{ 134 struct cppi *cppi = c->controller; 135 struct cppi_descriptor *bd; 136 137 (void) cppi_channel_abort(&c->channel); 138 c->channel.status = MUSB_DMA_STATUS_UNKNOWN; 139 c->controller = NULL; 140 141 /* free all its bds */ 142 bd = c->last_processed; 143 do { 144 if (bd) 145 dma_pool_free(cppi->pool, bd, bd->dma); 146 bd = cppi_bd_alloc(c); 147 } while (bd); 148 c->last_processed = NULL; 149} 150 151static int __init cppi_controller_start(struct dma_controller *c) 152{ 153 struct cppi *controller; 154 void __iomem *tibase; 155 int i; 156 157 controller = container_of(c, struct cppi, controller); 158 159 /* do whatever is necessary to start controller */ 160 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { 161 controller->tx[i].transmit = true; 162 controller->tx[i].index = i; 163 } 164 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { 165 controller->rx[i].transmit = false; 166 controller->rx[i].index = i; 167 } 168 169 /* setup BD list on a per channel basis */ 170 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) 171 cppi_pool_init(controller, controller->tx + i); 172 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) 173 cppi_pool_init(controller, controller->rx + i); 174 175 tibase = controller->tibase; 176 INIT_LIST_HEAD(&controller->tx_complete); 177 178 /* initialise tx/rx channel head pointers to zero */ 179 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { 180 struct cppi_channel *tx_ch = controller->tx + i; 181 struct cppi_tx_stateram __iomem *tx; 182 183 INIT_LIST_HEAD(&tx_ch->tx_complete); 184 185 tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i); 186 tx_ch->state_ram = tx; 187 cppi_reset_tx(tx, 0); 188 } 189 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { 190 struct cppi_channel *rx_ch = controller->rx + i; 191 struct cppi_rx_stateram __iomem *rx; 192 193 INIT_LIST_HEAD(&rx_ch->tx_complete); 194 195 rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i); 196 rx_ch->state_ram = rx; 197 cppi_reset_rx(rx); 198 } 199 200 /* enable individual cppi channels */ 201 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, 202 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 203 musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG, 204 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 205 206 /* enable tx/rx CPPI control */ 207 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); 208 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); 209 210 /* disable RNDIS mode, also host rx RNDIS autorequest */ 211 musb_writel(tibase, DAVINCI_RNDIS_REG, 0); 212 musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0); 213 214 return 0; 215} 216 217/* 218 * Stop DMA controller 219 * 220 * De-Init the DMA controller as necessary. 221 */ 222 223static int cppi_controller_stop(struct dma_controller *c) 224{ 225 struct cppi *controller; 226 void __iomem *tibase; 227 int i; 228 229 controller = container_of(c, struct cppi, controller); 230 231 tibase = controller->tibase; 232 /* DISABLE INDIVIDUAL CHANNEL Interrupts */ 233 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, 234 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 235 musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG, 236 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 237 238 DBG(1, "Tearing down RX and TX Channels\n"); 239 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { 240 /* FIXME restructure of txdma to use bds like rxdma */ 241 controller->tx[i].last_processed = NULL; 242 cppi_pool_free(controller->tx + i); 243 } 244 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) 245 cppi_pool_free(controller->rx + i); 246 247 /* in Tx Case proper teardown is supported. We resort to disabling 248 * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is 249 * complete TX CPPI cannot be disabled. 250 */ 251 /*disable tx/rx cppi */ 252 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); 253 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); 254 255 return 0; 256} 257 258/* While dma channel is allocated, we only want the core irqs active 259 * for fault reports, otherwise we'd get irqs that we don't care about. 260 * Except for TX irqs, where dma done != fifo empty and reusable ... 261 * 262 * NOTE: docs don't say either way, but irq masking **enables** irqs. 263 * 264 * REVISIT same issue applies to pure PIO usage too, and non-cppi dma... 265 */ 266static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum) 267{ 268 musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8)); 269} 270 271static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum) 272{ 273 musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8)); 274} 275 276 277/* 278 * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to 279 * each transfer direction of a non-control endpoint, so allocating 280 * (and deallocating) is mostly a way to notice bad housekeeping on 281 * the software side. We assume the irqs are always active. 282 */ 283static struct dma_channel * 284cppi_channel_allocate(struct dma_controller *c, 285 struct musb_hw_ep *ep, u8 transmit) 286{ 287 struct cppi *controller; 288 u8 index; 289 struct cppi_channel *cppi_ch; 290 void __iomem *tibase; 291 292 controller = container_of(c, struct cppi, controller); 293 tibase = controller->tibase; 294 295 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ 296 index = ep->epnum - 1; 297 298 /* return the corresponding CPPI Channel Handle, and 299 * probably disable the non-CPPI irq until we need it. 300 */ 301 if (transmit) { 302 if (index >= ARRAY_SIZE(controller->tx)) { 303 DBG(1, "no %cX%d CPPI channel\n", 'T', index); 304 return NULL; 305 } 306 cppi_ch = controller->tx + index; 307 } else { 308 if (index >= ARRAY_SIZE(controller->rx)) { 309 DBG(1, "no %cX%d CPPI channel\n", 'R', index); 310 return NULL; 311 } 312 cppi_ch = controller->rx + index; 313 core_rxirq_disable(tibase, ep->epnum); 314 } 315 316 /* REVISIT make this an error later once the same driver code works 317 * with the other DMA engine too 318 */ 319 if (cppi_ch->hw_ep) 320 DBG(1, "re-allocating DMA%d %cX channel %p\n", 321 index, transmit ? 'T' : 'R', cppi_ch); 322 cppi_ch->hw_ep = ep; 323 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; 324 325 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); 326 return &cppi_ch->channel; 327} 328 329/* Release a CPPI Channel. */ 330static void cppi_channel_release(struct dma_channel *channel) 331{ 332 struct cppi_channel *c; 333 void __iomem *tibase; 334 335 /* REVISIT: for paranoia, check state and abort if needed... */ 336 337 c = container_of(channel, struct cppi_channel, channel); 338 tibase = c->controller->tibase; 339 if (!c->hw_ep) 340 DBG(1, "releasing idle DMA channel %p\n", c); 341 else if (!c->transmit) 342 core_rxirq_enable(tibase, c->index + 1); 343 344 /* for now, leave its cppi IRQ enabled (we won't trigger it) */ 345 c->hw_ep = NULL; 346 channel->status = MUSB_DMA_STATUS_UNKNOWN; 347} 348 349/* Context: controller irqlocked */ 350static void 351cppi_dump_rx(int level, struct cppi_channel *c, const char *tag) 352{ 353 void __iomem *base = c->controller->mregs; 354 struct cppi_rx_stateram __iomem *rx = c->state_ram; 355 356 musb_ep_select(base, c->index + 1); 357 358 DBG(level, "RX DMA%d%s: %d left, csr %04x, " 359 "%08x H%08x S%08x C%08x, " 360 "B%08x L%08x %08x .. %08x" 361 "\n", 362 c->index, tag, 363 musb_readl(c->controller->tibase, 364 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), 365 musb_readw(c->hw_ep->regs, MUSB_RXCSR), 366 367 musb_readl(&rx->rx_skipbytes, 0), 368 musb_readl(&rx->rx_head, 0), 369 musb_readl(&rx->rx_sop, 0), 370 musb_readl(&rx->rx_current, 0), 371 372 musb_readl(&rx->rx_buf_current, 0), 373 musb_readl(&rx->rx_len_len, 0), 374 musb_readl(&rx->rx_cnt_cnt, 0), 375 musb_readl(&rx->rx_complete, 0) 376 ); 377} 378 379/* Context: controller irqlocked */ 380static void 381cppi_dump_tx(int level, struct cppi_channel *c, const char *tag) 382{ 383 void __iomem *base = c->controller->mregs; 384 struct cppi_tx_stateram __iomem *tx = c->state_ram; 385 386 musb_ep_select(base, c->index + 1); 387 388 DBG(level, "TX DMA%d%s: csr %04x, " 389 "H%08x S%08x C%08x %08x, " 390 "F%08x L%08x .. %08x" 391 "\n", 392 c->index, tag, 393 musb_readw(c->hw_ep->regs, MUSB_TXCSR), 394 395 musb_readl(&tx->tx_head, 0), 396 musb_readl(&tx->tx_buf, 0), 397 musb_readl(&tx->tx_current, 0), 398 musb_readl(&tx->tx_buf_current, 0), 399 400 musb_readl(&tx->tx_info, 0), 401 musb_readl(&tx->tx_rem_len, 0), 402 /* dummy/unused word 6 */ 403 musb_readl(&tx->tx_complete, 0) 404 ); 405} 406 407/* Context: controller irqlocked */ 408static inline void 409cppi_rndis_update(struct cppi_channel *c, int is_rx, 410 void __iomem *tibase, int is_rndis) 411{ 412 /* we may need to change the rndis flag for this cppi channel */ 413 if (c->is_rndis != is_rndis) { 414 u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG); 415 u32 temp = 1 << (c->index); 416 417 if (is_rx) 418 temp <<= 16; 419 if (is_rndis) 420 value |= temp; 421 else 422 value &= ~temp; 423 musb_writel(tibase, DAVINCI_RNDIS_REG, value); 424 c->is_rndis = is_rndis; 425 } 426} 427 428#ifdef CONFIG_USB_MUSB_DEBUG 429static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd) 430{ 431 pr_debug("RXBD/%s %08x: " 432 "nxt %08x buf %08x off.blen %08x opt.plen %08x\n", 433 tag, bd->dma, 434 bd->hw_next, bd->hw_bufp, bd->hw_off_len, 435 bd->hw_options); 436} 437#endif 438 439static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx) 440{ 441#ifdef CONFIG_USB_MUSB_DEBUG 442 struct cppi_descriptor *bd; 443 444 if (!_dbg_level(level)) 445 return; 446 cppi_dump_rx(level, rx, tag); 447 if (rx->last_processed) 448 cppi_dump_rxbd("last", rx->last_processed); 449 for (bd = rx->head; bd; bd = bd->next) 450 cppi_dump_rxbd("active", bd); 451#endif 452} 453 454 455/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX; 456 * so we won't ever use it (see "CPPI RX Woes" below). 457 */ 458static inline int cppi_autoreq_update(struct cppi_channel *rx, 459 void __iomem *tibase, int onepacket, unsigned n_bds) 460{ 461 u32 val; 462 463#ifdef RNDIS_RX_IS_USABLE 464 u32 tmp; 465 /* assert(is_host_active(musb)) */ 466 467 /* start from "AutoReq never" */ 468 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); 469 val = tmp & ~((0x3) << (rx->index * 2)); 470 471 /* HCD arranged reqpkt for packet #1. we arrange int 472 * for all but the last one, maybe in two segments. 473 */ 474 if (!onepacket) { 475#if 0 476 /* use two segments, autoreq "all" then the last "never" */ 477 val |= ((0x3) << (rx->index * 2)); 478 n_bds--; 479#else 480 /* one segment, autoreq "all-but-last" */ 481 val |= ((0x1) << (rx->index * 2)); 482#endif 483 } 484 485 if (val != tmp) { 486 int n = 100; 487 488 /* make sure that autoreq is updated before continuing */ 489 musb_writel(tibase, DAVINCI_AUTOREQ_REG, val); 490 do { 491 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); 492 if (tmp == val) 493 break; 494 cpu_relax(); 495 } while (n-- > 0); 496 } 497#endif 498 499 /* REQPKT is turned off after each segment */ 500 if (n_bds && rx->channel.actual_len) { 501 void __iomem *regs = rx->hw_ep->regs; 502 503 val = musb_readw(regs, MUSB_RXCSR); 504 if (!(val & MUSB_RXCSR_H_REQPKT)) { 505 val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS; 506 musb_writew(regs, MUSB_RXCSR, val); 507 /* flush writebufer */ 508 val = musb_readw(regs, MUSB_RXCSR); 509 } 510 } 511 return n_bds; 512} 513 514 515/* Buffer enqueuing Logic: 516 * 517 * - RX builds new queues each time, to help handle routine "early 518 * termination" cases (faults, including errors and short reads) 519 * more correctly. 520 * 521 * - for now, TX reuses the same queue of BDs every time 522 * 523 * REVISIT long term, we want a normal dynamic model. 524 * ... the goal will be to append to the 525 * existing queue, processing completed "dma buffers" (segments) on the fly. 526 * 527 * Otherwise we force an IRQ latency between requests, which slows us a lot 528 * (especially in "transparent" dma). Unfortunately that model seems to be 529 * inherent in the DMA model from the Mentor code, except in the rare case 530 * of transfers big enough (~128+ KB) that we could append "middle" segments 531 * in the TX paths. (RX can't do this, see below.) 532 * 533 * That's true even in the CPPI- friendly iso case, where most urbs have 534 * several small segments provided in a group and where the "packet at a time" 535 * "transparent" DMA model is always correct, even on the RX side. 536 */ 537 538/* 539 * CPPI TX: 540 * ======== 541 * TX is a lot more reasonable than RX; it doesn't need to run in 542 * irq-per-packet mode very often. RNDIS mode seems to behave too 543 * (except how it handles the exactly-N-packets case). Building a 544 * txdma queue with multiple requests (urb or usb_request) looks 545 * like it would work ... but fault handling would need much testing. 546 * 547 * The main issue with TX mode RNDIS relates to transfer lengths that 548 * are an exact multiple of the packet length. It appears that there's 549 * a hiccup in that case (maybe the DMA completes before the ZLP gets 550 * written?) boiling down to not being able to rely on CPPI writing any 551 * terminating zero length packet before the next transfer is written. 552 * So that's punted to PIO; better yet, gadget drivers can avoid it. 553 * 554 * Plus, there's allegedly an undocumented constraint that rndis transfer 555 * length be a multiple of 64 bytes ... but the chip doesn't act that 556 * way, and we really don't _want_ that behavior anyway. 557 * 558 * On TX, "transparent" mode works ... although experiments have shown 559 * problems trying to use the SOP/EOP bits in different USB packets. 560 * 561 * REVISIT try to handle terminating zero length packets using CPPI 562 * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet 563 * links avoid that issue by forcing them to avoid zlps.) 564 */ 565static void 566cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) 567{ 568 unsigned maxpacket = tx->maxpacket; 569 dma_addr_t addr = tx->buf_dma + tx->offset; 570 size_t length = tx->buf_len - tx->offset; 571 struct cppi_descriptor *bd; 572 unsigned n_bds; 573 unsigned i; 574 struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram; 575 int rndis; 576 577 /* TX can use the CPPI "rndis" mode, where we can probably fit this 578 * transfer in one BD and one IRQ. The only time we would NOT want 579 * to use it is when hardware constraints prevent it, or if we'd 580 * trigger the "send a ZLP?" confusion. 581 */ 582 rndis = (maxpacket & 0x3f) == 0 583 && length > maxpacket 584 && length < 0xffff 585 && (length % maxpacket) != 0; 586 587 if (rndis) { 588 maxpacket = length; 589 n_bds = 1; 590 } else { 591 n_bds = length / maxpacket; 592 if (!length || (length % maxpacket)) 593 n_bds++; 594 n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD); 595 length = min(n_bds * maxpacket, length); 596 } 597 598 DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", 599 tx->index, 600 maxpacket, 601 rndis ? "rndis" : "transparent", 602 n_bds, 603 addr, length); 604 605 cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); 606 607 /* assuming here that channel_program is called during 608 * transfer initiation ... current code maintains state 609 * for one outstanding request only (no queues, not even 610 * the implicit ones of an iso urb). 611 */ 612 613 bd = tx->freelist; 614 tx->head = bd; 615 tx->last_processed = NULL; 616 617 /* FIXME use BD pool like RX side does, and just queue 618 * the minimum number for this request. 619 */ 620 621 /* Prepare queue of BDs first, then hand it to hardware. 622 * All BDs except maybe the last should be of full packet 623 * size; for RNDIS there _is_ only that last packet. 624 */ 625 for (i = 0; i < n_bds; ) { 626 if (++i < n_bds && bd->next) 627 bd->hw_next = bd->next->dma; 628 else 629 bd->hw_next = 0; 630 631 bd->hw_bufp = tx->buf_dma + tx->offset; 632 633 /* FIXME set EOP only on the last packet, 634 * SOP only on the first ... avoid IRQs 635 */ 636 if ((tx->offset + maxpacket) <= tx->buf_len) { 637 tx->offset += maxpacket; 638 bd->hw_off_len = maxpacket; 639 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET 640 | CPPI_OWN_SET | maxpacket; 641 } else { 642 /* only this one may be a partial USB Packet */ 643 u32 partial_len; 644 645 partial_len = tx->buf_len - tx->offset; 646 tx->offset = tx->buf_len; 647 bd->hw_off_len = partial_len; 648 649 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET 650 | CPPI_OWN_SET | partial_len; 651 if (partial_len == 0) 652 bd->hw_options |= CPPI_ZERO_SET; 653 } 654 655 DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n", 656 bd, bd->hw_next, bd->hw_bufp, 657 bd->hw_off_len, bd->hw_options); 658 659 /* update the last BD enqueued to the list */ 660 tx->tail = bd; 661 bd = bd->next; 662 } 663 664 /* BDs live in DMA-coherent memory, but writes might be pending */ 665 cpu_drain_writebuffer(); 666 667 /* Write to the HeadPtr in state RAM to trigger */ 668 musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma); 669 670 cppi_dump_tx(5, tx, "/S"); 671} 672 673/* 674 * CPPI RX Woes: 675 * ============= 676 * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte 677 * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back. 678 * (Full speed transfers have similar scenarios.) 679 * 680 * The correct behavior for Linux is that (a) fills the buffer with 300 bytes, 681 * and the next packet goes into a buffer that's queued later; while (b) fills 682 * the buffer with 1024 bytes. How to do that with CPPI? 683 * 684 * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but 685 * (b) loses **BADLY** because nothing (!) happens when that second packet 686 * fills the buffer, much less when a third one arrives. (Which makes this 687 * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination 688 * is optional, and it's fine if peripherals -- not hosts! -- pad messages 689 * out to end-of-buffer. Standard PCI host controller DMA descriptors 690 * implement that mode by default ... which is no accident.) 691 * 692 * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have 693 * converse problems: (b) is handled right, but (a) loses badly. CPPI RX 694 * ignores SOP/EOP markings and processes both of those BDs; so both packets 695 * are loaded into the buffer (with a 212 byte gap between them), and the next 696 * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP 697 * are intended as outputs for RX queues, not inputs...) 698 * 699 * - A variant of "transparent" mode -- one BD at a time -- is the only way to 700 * reliably make both cases work, with software handling both cases correctly 701 * and at the significant penalty of needing an IRQ per packet. (The lack of 702 * I/O overlap can be slightly ameliorated by enabling double buffering.) 703 * 704 * So how to get rid of IRQ-per-packet? The transparent multi-BD case could 705 * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK 706 * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors 707 * with guaranteed driver level fault recovery and scrubbing out what's left 708 * of that garbaged datastream. 709 * 710 * But there seems to be no way to identify the cases where CPPI RNDIS mode 711 * is appropriate -- which do NOT include RNDIS host drivers, but do include 712 * the CDC Ethernet driver! -- and the documentation is incomplete/wrong. 713 * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic 714 * that applies best on the peripheral side (and which could fail rudely). 715 * 716 * Leaving only "transparent" mode; we avoid multi-bd modes in almost all 717 * cases other than mass storage class. Otherwise we're correct but slow, 718 * since CPPI penalizes our need for a "true RNDIS" default mode. 719 */ 720 721 722/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY 723 * 724 * IFF 725 * (a) peripheral mode ... since rndis peripherals could pad their 726 * writes to hosts, causing i/o failure; or we'd have to cope with 727 * a largely unknowable variety of host side protocol variants 728 * (b) and short reads are NOT errors ... since full reads would 729 * cause those same i/o failures 730 * (c) and read length is 731 * - less than 64KB (max per cppi descriptor) 732 * - not a multiple of 4096 (g_zero default, full reads typical) 733 * - N (>1) packets long, ditto (full reads not EXPECTED) 734 * THEN 735 * try rx rndis mode 736 * 737 * Cost of heuristic failing: RXDMA wedges at the end of transfers that 738 * fill out the whole buffer. Buggy host side usb network drivers could 739 * trigger that, but "in the field" such bugs seem to be all but unknown. 740 * 741 * So this module parameter lets the heuristic be disabled. When using 742 * gadgetfs, the heuristic will probably need to be disabled. 743 */ 744static int cppi_rx_rndis = 1; 745 746module_param(cppi_rx_rndis, bool, 0); 747MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic"); 748 749 750/** 751 * cppi_next_rx_segment - dma read for the next chunk of a buffer 752 * @musb: the controller 753 * @rx: dma channel 754 * @onepacket: true unless caller treats short reads as errors, and 755 * performs fault recovery above usbcore. 756 * Context: controller irqlocked 757 * 758 * See above notes about why we can't use multi-BD RX queues except in 759 * rare cases (mass storage class), and can never use the hardware "rndis" 760 * mode (since it's not a "true" RNDIS mode) with complete safety.. 761 * 762 * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in 763 * code to recover from corrupted datastreams after each short transfer. 764 */ 765static void 766cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) 767{ 768 unsigned maxpacket = rx->maxpacket; 769 dma_addr_t addr = rx->buf_dma + rx->offset; 770 size_t length = rx->buf_len - rx->offset; 771 struct cppi_descriptor *bd, *tail; 772 unsigned n_bds; 773 unsigned i; 774 void __iomem *tibase = musb->ctrl_base; 775 int is_rndis = 0; 776 struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram; 777 778 if (onepacket) { 779 /* almost every USB driver, host or peripheral side */ 780 n_bds = 1; 781 782 /* maybe apply the heuristic above */ 783 if (cppi_rx_rndis 784 && is_peripheral_active(musb) 785 && length > maxpacket 786 && (length & ~0xffff) == 0 787 && (length & 0x0fff) != 0 788 && (length & (maxpacket - 1)) == 0) { 789 maxpacket = length; 790 is_rndis = 1; 791 } 792 } else { 793 /* virtually nothing except mass storage class */ 794 if (length > 0xffff) { 795 n_bds = 0xffff / maxpacket; 796 length = n_bds * maxpacket; 797 } else { 798 n_bds = length / maxpacket; 799 if (length % maxpacket) 800 n_bds++; 801 } 802 if (n_bds == 1) 803 onepacket = 1; 804 else 805 n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD); 806 } 807 808 /* In host mode, autorequest logic can generate some IN tokens; it's 809 * tricky since we can't leave REQPKT set in RXCSR after the transfer 810 * finishes. So: multipacket transfers involve two or more segments. 811 * And always at least two IRQs ... RNDIS mode is not an option. 812 */ 813 if (is_host_active(musb)) 814 n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds); 815 816 cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis); 817 818 length = min(n_bds * maxpacket, length); 819 820 DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " 821 "dma 0x%x len %u %u/%u\n", 822 rx->index, maxpacket, 823 onepacket 824 ? (is_rndis ? "rndis" : "onepacket") 825 : "multipacket", 826 n_bds, 827 musb_readl(tibase, 828 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) 829 & 0xffff, 830 addr, length, rx->channel.actual_len, rx->buf_len); 831 832 /* only queue one segment at a time, since the hardware prevents 833 * correct queue shutdown after unexpected short packets 834 */ 835 bd = cppi_bd_alloc(rx); 836 rx->head = bd; 837 838 /* Build BDs for all packets in this segment */ 839 for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) { 840 u32 bd_len; 841 842 if (i) { 843 bd = cppi_bd_alloc(rx); 844 if (!bd) 845 break; 846 tail->next = bd; 847 tail->hw_next = bd->dma; 848 } 849 bd->hw_next = 0; 850 851 /* all but the last packet will be maxpacket size */ 852 if (maxpacket < length) 853 bd_len = maxpacket; 854 else 855 bd_len = length; 856 857 bd->hw_bufp = addr; 858 addr += bd_len; 859 rx->offset += bd_len; 860 861 bd->hw_off_len = (0 /*offset*/ << 16) + bd_len; 862 bd->buflen = bd_len; 863 864 bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0); 865 length -= bd_len; 866 } 867 868 /* we always expect at least one reusable BD! */ 869 if (!tail) { 870 WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds); 871 return; 872 } else if (i < n_bds) 873 WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds); 874 875 tail->next = NULL; 876 tail->hw_next = 0; 877 878 bd = rx->head; 879 rx->tail = tail; 880 881 /* short reads and other faults should terminate this entire 882 * dma segment. we want one "dma packet" per dma segment, not 883 * one per USB packet, terminating the whole queue at once... 884 * NOTE that current hardware seems to ignore SOP and EOP. 885 */ 886 bd->hw_options |= CPPI_SOP_SET; 887 tail->hw_options |= CPPI_EOP_SET; 888 889#ifdef CONFIG_USB_MUSB_DEBUG 890 if (_dbg_level(5)) { 891 struct cppi_descriptor *d; 892 893 for (d = rx->head; d; d = d->next) 894 cppi_dump_rxbd("S", d); 895 } 896#endif 897 898 /* in case the preceding transfer left some state... */ 899 tail = rx->last_processed; 900 if (tail) { 901 tail->next = bd; 902 tail->hw_next = bd->dma; 903 } 904 905 core_rxirq_enable(tibase, rx->index + 1); 906 907 /* BDs live in DMA-coherent memory, but writes might be pending */ 908 cpu_drain_writebuffer(); 909 910 /* REVISIT specs say to write this AFTER the BUFCNT register 911 * below ... but that loses badly. 912 */ 913 musb_writel(&rx_ram->rx_head, 0, bd->dma); 914 915 /* bufferCount must be at least 3, and zeroes on completion 916 * unless it underflows below zero, or stops at two, or keeps 917 * growing ... grr. 918 */ 919 i = musb_readl(tibase, 920 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) 921 & 0xffff; 922 923 if (!i) 924 musb_writel(tibase, 925 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), 926 n_bds + 2); 927 else if (n_bds > (i - 3)) 928 musb_writel(tibase, 929 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), 930 n_bds - (i - 3)); 931 932 i = musb_readl(tibase, 933 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) 934 & 0xffff; 935 if (i < (2 + n_bds)) { 936 DBG(2, "bufcnt%d underrun - %d (for %d)\n", 937 rx->index, i, n_bds); 938 musb_writel(tibase, 939 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), 940 n_bds + 2); 941 } 942 943 cppi_dump_rx(4, rx, "/S"); 944} 945 946/** 947 * cppi_channel_program - program channel for data transfer 948 * @ch: the channel 949 * @maxpacket: max packet size 950 * @mode: For RX, 1 unless the usb protocol driver promised to treat 951 * all short reads as errors and kick in high level fault recovery. 952 * For TX, ignored because of RNDIS mode races/glitches. 953 * @dma_addr: dma address of buffer 954 * @len: length of buffer 955 * Context: controller irqlocked 956 */ 957static int cppi_channel_program(struct dma_channel *ch, 958 u16 maxpacket, u8 mode, 959 dma_addr_t dma_addr, u32 len) 960{ 961 struct cppi_channel *cppi_ch; 962 struct cppi *controller; 963 struct musb *musb; 964 965 cppi_ch = container_of(ch, struct cppi_channel, channel); 966 controller = cppi_ch->controller; 967 musb = controller->musb; 968 969 switch (ch->status) { 970 case MUSB_DMA_STATUS_BUS_ABORT: 971 case MUSB_DMA_STATUS_CORE_ABORT: 972 /* fault irq handler should have handled cleanup */ 973 WARNING("%cX DMA%d not cleaned up after abort!\n", 974 cppi_ch->transmit ? 'T' : 'R', 975 cppi_ch->index); 976 /* WARN_ON(1); */ 977 break; 978 case MUSB_DMA_STATUS_BUSY: 979 WARNING("program active channel? %cX DMA%d\n", 980 cppi_ch->transmit ? 'T' : 'R', 981 cppi_ch->index); 982 /* WARN_ON(1); */ 983 break; 984 case MUSB_DMA_STATUS_UNKNOWN: 985 DBG(1, "%cX DMA%d not allocated!\n", 986 cppi_ch->transmit ? 'T' : 'R', 987 cppi_ch->index); 988 /* FALLTHROUGH */ 989 case MUSB_DMA_STATUS_FREE: 990 break; 991 } 992 993 ch->status = MUSB_DMA_STATUS_BUSY; 994 995 /* set transfer parameters, then queue up its first segment */ 996 cppi_ch->buf_dma = dma_addr; 997 cppi_ch->offset = 0; 998 cppi_ch->maxpacket = maxpacket; 999 cppi_ch->buf_len = len; 1000 cppi_ch->channel.actual_len = 0; 1001 1002 /* TX channel? or RX? */ 1003 if (cppi_ch->transmit) 1004 cppi_next_tx_segment(musb, cppi_ch); 1005 else 1006 cppi_next_rx_segment(musb, cppi_ch, mode); 1007 1008 return true; 1009} 1010 1011static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) 1012{ 1013 struct cppi_channel *rx = &cppi->rx[ch]; 1014 struct cppi_rx_stateram __iomem *state = rx->state_ram; 1015 struct cppi_descriptor *bd; 1016 struct cppi_descriptor *last = rx->last_processed; 1017 bool completed = false; 1018 bool acked = false; 1019 int i; 1020 dma_addr_t safe2ack; 1021 void __iomem *regs = rx->hw_ep->regs; 1022 1023 cppi_dump_rx(6, rx, "/K"); 1024 1025 bd = last ? last->next : rx->head; 1026 if (!bd) 1027 return false; 1028 1029 /* run through all completed BDs */ 1030 for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0); 1031 (safe2ack || completed) && bd && i < NUM_RXCHAN_BD; 1032 i++, bd = bd->next) { 1033 u16 len; 1034 1035 /* catch latest BD writes from CPPI */ 1036 rmb(); 1037 if (!completed && (bd->hw_options & CPPI_OWN_SET)) 1038 break; 1039 1040 DBG(5, "C/RXBD %08x: nxt %08x buf %08x " 1041 "off.len %08x opt.len %08x (%d)\n", 1042 bd->dma, bd->hw_next, bd->hw_bufp, 1043 bd->hw_off_len, bd->hw_options, 1044 rx->channel.actual_len); 1045 1046 /* actual packet received length */ 1047 if ((bd->hw_options & CPPI_SOP_SET) && !completed) 1048 len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK; 1049 else 1050 len = 0; 1051 1052 if (bd->hw_options & CPPI_EOQ_MASK) 1053 completed = true; 1054 1055 if (!completed && len < bd->buflen) { 1056 /* NOTE: when we get a short packet, RXCSR_H_REQPKT 1057 * must have been cleared, and no more DMA packets may 1058 * active be in the queue... TI docs didn't say, but 1059 * CPPI ignores those BDs even though OWN is still set. 1060 */ 1061 completed = true; 1062 DBG(3, "rx short %d/%d (%d)\n", 1063 len, bd->buflen, 1064 rx->channel.actual_len); 1065 } 1066 1067 /* If we got here, we expect to ack at least one BD; meanwhile 1068 * CPPI may completing other BDs while we scan this list... 1069 * 1070 * RACE: we can notice OWN cleared before CPPI raises the 1071 * matching irq by writing that BD as the completion pointer. 1072 * In such cases, stop scanning and wait for the irq, avoiding 1073 * lost acks and states where BD ownership is unclear. 1074 */ 1075 if (bd->dma == safe2ack) { 1076 musb_writel(&state->rx_complete, 0, safe2ack); 1077 safe2ack = musb_readl(&state->rx_complete, 0); 1078 acked = true; 1079 if (bd->dma == safe2ack) 1080 safe2ack = 0; 1081 } 1082 1083 rx->channel.actual_len += len; 1084 1085 cppi_bd_free(rx, last); 1086 last = bd; 1087 1088 /* stop scanning on end-of-segment */ 1089 if (bd->hw_next == 0) 1090 completed = true; 1091 } 1092 rx->last_processed = last; 1093 1094 /* dma abort, lost ack, or ... */ 1095 if (!acked && last) { 1096 int csr; 1097 1098 if (safe2ack == 0 || safe2ack == rx->last_processed->dma) 1099 musb_writel(&state->rx_complete, 0, safe2ack); 1100 if (safe2ack == 0) { 1101 cppi_bd_free(rx, last); 1102 rx->last_processed = NULL; 1103 1104 /* if we land here on the host side, H_REQPKT will 1105 * be clear and we need to restart the queue... 1106 */ 1107 WARN_ON(rx->head); 1108 } 1109 musb_ep_select(cppi->mregs, rx->index + 1); 1110 csr = musb_readw(regs, MUSB_RXCSR); 1111 if (csr & MUSB_RXCSR_DMAENAB) { 1112 DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", 1113 rx->index, 1114 rx->head, rx->tail, 1115 rx->last_processed 1116 ? rx->last_processed->dma 1117 : 0, 1118 completed ? ", completed" : "", 1119 csr); 1120 cppi_dump_rxq(4, "/what?", rx); 1121 } 1122 } 1123 if (!completed) { 1124 int csr; 1125 1126 rx->head = bd; 1127 1128 /* REVISIT seems like "autoreq all but EOP" doesn't... 1129 * setting it here "should" be racey, but seems to work 1130 */ 1131 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); 1132 if (is_host_active(cppi->musb) 1133 && bd 1134 && !(csr & MUSB_RXCSR_H_REQPKT)) { 1135 csr |= MUSB_RXCSR_H_REQPKT; 1136 musb_writew(regs, MUSB_RXCSR, 1137 MUSB_RXCSR_H_WZC_BITS | csr); 1138 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); 1139 } 1140 } else { 1141 rx->head = NULL; 1142 rx->tail = NULL; 1143 } 1144 1145 cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned"); 1146 return completed; 1147} 1148 1149irqreturn_t cppi_interrupt(int irq, void *dev_id) 1150{ 1151 struct musb *musb = dev_id; 1152 struct cppi *cppi; 1153 void __iomem *tibase; 1154 struct musb_hw_ep *hw_ep = NULL; 1155 u32 rx, tx; 1156 int i, index; 1157 1158 cppi = container_of(musb->dma_controller, struct cppi, controller); 1159 1160 tibase = musb->ctrl_base; 1161 1162 tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); 1163 rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); 1164 1165 if (!tx && !rx) 1166 return IRQ_NONE; 1167 1168 DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx); 1169 1170 /* process TX channels */ 1171 for (index = 0; tx; tx = tx >> 1, index++) { 1172 struct cppi_channel *tx_ch; 1173 struct cppi_tx_stateram __iomem *tx_ram; 1174 bool completed = false; 1175 struct cppi_descriptor *bd; 1176 1177 if (!(tx & 1)) 1178 continue; 1179 1180 tx_ch = cppi->tx + index; 1181 tx_ram = tx_ch->state_ram; 1182 1183 /* FIXME need a cppi_tx_scan() routine, which 1184 * can also be called from abort code 1185 */ 1186 1187 cppi_dump_tx(5, tx_ch, "/E"); 1188 1189 bd = tx_ch->head; 1190 1191 if (NULL == bd) { 1192 DBG(1, "null BD\n"); 1193 continue; 1194 } 1195 1196 /* run through all completed BDs */ 1197 for (i = 0; !completed && bd && i < NUM_TXCHAN_BD; 1198 i++, bd = bd->next) { 1199 u16 len; 1200 1201 /* catch latest BD writes from CPPI */ 1202 rmb(); 1203 if (bd->hw_options & CPPI_OWN_SET) 1204 break; 1205 1206 DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n", 1207 bd, bd->hw_next, bd->hw_bufp, 1208 bd->hw_off_len, bd->hw_options); 1209 1210 len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK; 1211 tx_ch->channel.actual_len += len; 1212 1213 tx_ch->last_processed = bd; 1214 1215 /* write completion register to acknowledge 1216 * processing of completed BDs, and possibly 1217 * release the IRQ; EOQ might not be set ... 1218 * 1219 * REVISIT use the same ack strategy as rx 1220 * 1221 * REVISIT have observed bit 18 set; huh?? 1222 */ 1223 /* if ((bd->hw_options & CPPI_EOQ_MASK)) */ 1224 musb_writel(&tx_ram->tx_complete, 0, bd->dma); 1225 1226 /* stop scanning on end-of-segment */ 1227 if (bd->hw_next == 0) 1228 completed = true; 1229 } 1230 1231 /* on end of segment, maybe go to next one */ 1232 if (completed) { 1233 /* cppi_dump_tx(4, tx_ch, "/complete"); */ 1234 1235 /* transfer more, or report completion */ 1236 if (tx_ch->offset >= tx_ch->buf_len) { 1237 tx_ch->head = NULL; 1238 tx_ch->tail = NULL; 1239 tx_ch->channel.status = MUSB_DMA_STATUS_FREE; 1240 1241 hw_ep = tx_ch->hw_ep; 1242 1243 musb_dma_completion(musb, index + 1, 1); 1244 1245 } else { 1246 /* Bigger transfer than we could fit in 1247 * that first batch of descriptors... 1248 */ 1249 cppi_next_tx_segment(musb, tx_ch); 1250 } 1251 } else 1252 tx_ch->head = bd; 1253 } 1254 1255 /* Start processing the RX block */ 1256 for (index = 0; rx; rx = rx >> 1, index++) { 1257 1258 if (rx & 1) { 1259 struct cppi_channel *rx_ch; 1260 1261 rx_ch = cppi->rx + index; 1262 1263 /* let incomplete dma segments finish */ 1264 if (!cppi_rx_scan(cppi, index)) 1265 continue; 1266 1267 /* start another dma segment if needed */ 1268 if (rx_ch->channel.actual_len != rx_ch->buf_len 1269 && rx_ch->channel.actual_len 1270 == rx_ch->offset) { 1271 cppi_next_rx_segment(musb, rx_ch, 1); 1272 continue; 1273 } 1274 1275 /* all segments completed! */ 1276 rx_ch->channel.status = MUSB_DMA_STATUS_FREE; 1277 1278 hw_ep = rx_ch->hw_ep; 1279 1280 core_rxirq_disable(tibase, index + 1); 1281 musb_dma_completion(musb, index + 1, 0); 1282 } 1283 } 1284 1285 /* write to CPPI EOI register to re-enable interrupts */ 1286 musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); 1287 1288 return IRQ_HANDLED; 1289} 1290 1291/* Instantiate a software object representing a DMA controller. */ 1292struct dma_controller *__init 1293dma_controller_create(struct musb *musb, void __iomem *mregs) 1294{ 1295 struct cppi *controller; 1296 struct device *dev = musb->controller; 1297 struct platform_device *pdev = to_platform_device(dev); 1298 int irq = platform_get_irq(pdev, 1); 1299 1300 controller = kzalloc(sizeof *controller, GFP_KERNEL); 1301 if (!controller) 1302 return NULL; 1303 1304 controller->mregs = mregs; 1305 controller->tibase = mregs - DAVINCI_BASE_OFFSET; 1306 1307 controller->musb = musb; 1308 controller->controller.start = cppi_controller_start; 1309 controller->controller.stop = cppi_controller_stop; 1310 controller->controller.channel_alloc = cppi_channel_allocate; 1311 controller->controller.channel_release = cppi_channel_release; 1312 controller->controller.channel_program = cppi_channel_program; 1313 controller->controller.channel_abort = cppi_channel_abort; 1314 1315 /* NOTE: allocating from on-chip SRAM would give the least 1316 * contention for memory access, if that ever matters here. 1317 */ 1318 1319 /* setup BufferPool */ 1320 controller->pool = dma_pool_create("cppi", 1321 controller->musb->controller, 1322 sizeof(struct cppi_descriptor), 1323 CPPI_DESCRIPTOR_ALIGN, 0); 1324 if (!controller->pool) { 1325 kfree(controller); 1326 return NULL; 1327 } 1328 1329 if (irq > 0) { 1330 if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) { 1331 dev_err(dev, "request_irq %d failed!\n", irq); 1332 dma_controller_destroy(&controller->controller); 1333 return NULL; 1334 } 1335 controller->irq = irq; 1336 } 1337 1338 return &controller->controller; 1339} 1340 1341/* 1342 * Destroy a previously-instantiated DMA controller. 1343 */ 1344void dma_controller_destroy(struct dma_controller *c) 1345{ 1346 struct cppi *cppi; 1347 1348 cppi = container_of(c, struct cppi, controller); 1349 1350 if (cppi->irq) 1351 free_irq(cppi->irq, cppi->musb); 1352 1353 /* assert: caller stopped the controller first */ 1354 dma_pool_destroy(cppi->pool); 1355 1356 kfree(cppi); 1357} 1358 1359/* 1360 * Context: controller irqlocked, endpoint selected 1361 */ 1362static int cppi_channel_abort(struct dma_channel *channel) 1363{ 1364 struct cppi_channel *cppi_ch; 1365 struct cppi *controller; 1366 void __iomem *mbase; 1367 void __iomem *tibase; 1368 void __iomem *regs; 1369 u32 value; 1370 struct cppi_descriptor *queue; 1371 1372 cppi_ch = container_of(channel, struct cppi_channel, channel); 1373 1374 controller = cppi_ch->controller; 1375 1376 switch (channel->status) { 1377 case MUSB_DMA_STATUS_BUS_ABORT: 1378 case MUSB_DMA_STATUS_CORE_ABORT: 1379 /* from RX or TX fault irq handler */ 1380 case MUSB_DMA_STATUS_BUSY: 1381 /* the hardware needs shutting down */ 1382 regs = cppi_ch->hw_ep->regs; 1383 break; 1384 case MUSB_DMA_STATUS_UNKNOWN: 1385 case MUSB_DMA_STATUS_FREE: 1386 return 0; 1387 default: 1388 return -EINVAL; 1389 } 1390 1391 if (!cppi_ch->transmit && cppi_ch->head) 1392 cppi_dump_rxq(3, "/abort", cppi_ch); 1393 1394 mbase = controller->mregs; 1395 tibase = controller->tibase; 1396 1397 queue = cppi_ch->head; 1398 cppi_ch->head = NULL; 1399 cppi_ch->tail = NULL; 1400 1401 /* REVISIT should rely on caller having done this, 1402 * and caller should rely on us not changing it. 1403 * peripheral code is safe ... check host too. 1404 */ 1405 musb_ep_select(mbase, cppi_ch->index + 1); 1406 1407 if (cppi_ch->transmit) { 1408 struct cppi_tx_stateram __iomem *tx_ram; 1409 int enabled; 1410 1411 /* mask interrupts raised to signal teardown complete. */ 1412 enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG) 1413 & (1 << cppi_ch->index); 1414 if (enabled) 1415 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, 1416 (1 << cppi_ch->index)); 1417 1418 /* REVISIT put timeouts on these controller handshakes */ 1419 1420 cppi_dump_tx(6, cppi_ch, " (teardown)"); 1421 1422 /* teardown DMA engine then usb core */ 1423 do { 1424 value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG); 1425 } while (!(value & CPPI_TEAR_READY)); 1426 musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index); 1427 1428 tx_ram = cppi_ch->state_ram; 1429 do { 1430 value = musb_readl(&tx_ram->tx_complete, 0); 1431 } while (0xFFFFFFFC != value); 1432 musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC); 1433 1434 /* FIXME clean up the transfer state ... here? 1435 * the completion routine should get called with 1436 * an appropriate status code. 1437 */ 1438 1439 value = musb_readw(regs, MUSB_TXCSR); 1440 value &= ~MUSB_TXCSR_DMAENAB; 1441 value |= MUSB_TXCSR_FLUSHFIFO; 1442 musb_writew(regs, MUSB_TXCSR, value); 1443 musb_writew(regs, MUSB_TXCSR, value); 1444 1445 /* While we scrub the TX state RAM, ensure that we clean 1446 * up any interrupt that's currently asserted: 1447 * 1. Write to completion Ptr value 0x1(bit 0 set) 1448 * (write back mode) 1449 * 2. Write to completion Ptr value 0x0(bit 0 cleared) 1450 * (compare mode) 1451 * Value written is compared(for bits 31:2) and when 1452 * equal, interrupt is deasserted. 1453 */ 1454 cppi_reset_tx(tx_ram, 1); 1455 musb_writel(&tx_ram->tx_complete, 0, 0); 1456 1457 /* re-enable interrupt */ 1458 if (enabled) 1459 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, 1460 (1 << cppi_ch->index)); 1461 1462 cppi_dump_tx(5, cppi_ch, " (done teardown)"); 1463 1464 /* REVISIT tx side _should_ clean up the same way 1465 * as the RX side ... this does no cleanup at all! 1466 */ 1467 1468 } else /* RX */ { 1469 u16 csr; 1470 1471 /* NOTE: docs don't guarantee any of this works ... we 1472 * expect that if the usb core stops telling the cppi core 1473 * to pull more data from it, then it'll be safe to flush 1474 * current RX DMA state iff any pending fifo transfer is done. 1475 */ 1476 1477 core_rxirq_disable(tibase, cppi_ch->index + 1); 1478 1479 /* for host, ensure ReqPkt is never set again */ 1480 if (is_host_active(cppi_ch->controller->musb)) { 1481 value = musb_readl(tibase, DAVINCI_AUTOREQ_REG); 1482 value &= ~((0x3) << (cppi_ch->index * 2)); 1483 musb_writel(tibase, DAVINCI_AUTOREQ_REG, value); 1484 } 1485 1486 csr = musb_readw(regs, MUSB_RXCSR); 1487 1488 /* for host, clear (just) ReqPkt at end of current packet(s) */ 1489 if (is_host_active(cppi_ch->controller->musb)) { 1490 csr |= MUSB_RXCSR_H_WZC_BITS; 1491 csr &= ~MUSB_RXCSR_H_REQPKT; 1492 } else 1493 csr |= MUSB_RXCSR_P_WZC_BITS; 1494 1495 /* clear dma enable */ 1496 csr &= ~(MUSB_RXCSR_DMAENAB); 1497 musb_writew(regs, MUSB_RXCSR, csr); 1498 csr = musb_readw(regs, MUSB_RXCSR); 1499 1500 /* Quiesce: wait for current dma to finish (if not cleanup). 1501 * We can't use bit zero of stateram->rx_sop, since that 1502 * refers to an entire "DMA packet" not just emptying the 1503 * current fifo. Most segments need multiple usb packets. 1504 */ 1505 if (channel->status == MUSB_DMA_STATUS_BUSY) 1506 udelay(50); 1507 1508 /* scan the current list, reporting any data that was 1509 * transferred and acking any IRQ 1510 */ 1511 cppi_rx_scan(controller, cppi_ch->index); 1512 1513 /* clobber the existing state once it's idle 1514 * 1515 * NOTE: arguably, we should also wait for all the other 1516 * RX channels to quiesce (how??) and then temporarily 1517 * disable RXCPPI_CTRL_REG ... but it seems that we can 1518 * rely on the controller restarting from state ram, with 1519 * only RXCPPI_BUFCNT state being bogus. BUFCNT will 1520 * correct itself after the next DMA transfer though. 1521 * 1522 * REVISIT does using rndis mode change that? 1523 */ 1524 cppi_reset_rx(cppi_ch->state_ram); 1525 1526 /* next DMA request _should_ load cppi head ptr */ 1527 1528 /* ... we don't "free" that list, only mutate it in place. */ 1529 cppi_dump_rx(5, cppi_ch, " (done abort)"); 1530 1531 /* clean up previously pending bds */ 1532 cppi_bd_free(cppi_ch, cppi_ch->last_processed); 1533 cppi_ch->last_processed = NULL; 1534 1535 while (queue) { 1536 struct cppi_descriptor *tmp = queue->next; 1537 1538 cppi_bd_free(cppi_ch, queue); 1539 queue = tmp; 1540 } 1541 } 1542 1543 channel->status = MUSB_DMA_STATUS_FREE; 1544 cppi_ch->buf_dma = 0; 1545 cppi_ch->offset = 0; 1546 cppi_ch->buf_len = 0; 1547 cppi_ch->maxpacket = 0; 1548 return 0; 1549} 1550 1551/* TBD Queries: 1552 * 1553 * Power Management ... probably turn off cppi during suspend, restart; 1554 * check state ram? Clocking is presumably shared with usb core. 1555 */