Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.28 1540 lines 44 kB view raw
1/* 2 * Copyright (C) 2005-2006 by Texas Instruments 3 * 4 * This file implements a DMA interface using TI's CPPI DMA. 5 * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB. 6 * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci. 7 */ 8 9#include <linux/usb.h> 10 11#include "musb_core.h" 12#include "cppi_dma.h" 13 14 15/* CPPI DMA status 7-mar-2006: 16 * 17 * - See musb_{host,gadget}.c for more info 18 * 19 * - Correct RX DMA generally forces the engine into irq-per-packet mode, 20 * which can easily saturate the CPU under non-mass-storage loads. 21 * 22 * NOTES 24-aug-2006 (2.6.18-rc4): 23 * 24 * - peripheral RXDMA wedged in a test with packets of length 512/512/1. 25 * evidently after the 1 byte packet was received and acked, the queue 26 * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003, 27 * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401 28 * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx 29 * of its next (512 byte) packet. IRQ issues? 30 * 31 * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will 32 * evidently also directly update the RX and TX CSRs ... so audit all 33 * host and peripheral side DMA code to avoid CSR access after DMA has 34 * been started. 35 */ 36 37/* REVISIT now we can avoid preallocating these descriptors; or 38 * more simply, switch to a global freelist not per-channel ones. 39 * Note: at full speed, 64 descriptors == 4K bulk data. 40 */ 41#define NUM_TXCHAN_BD 64 42#define NUM_RXCHAN_BD 64 43 44static inline void cpu_drain_writebuffer(void) 45{ 46 wmb(); 47#ifdef CONFIG_CPU_ARM926T 48 /* REVISIT this "should not be needed", 49 * but lack of it sure seemed to hurt ... 50 */ 51 asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n"); 52#endif 53} 54 55static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c) 56{ 57 struct cppi_descriptor *bd = c->freelist; 58 59 if (bd) 60 c->freelist = bd->next; 61 return bd; 62} 63 64static inline void 65cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd) 66{ 67 if (!bd) 68 return; 69 bd->next = c->freelist; 70 c->freelist = bd; 71} 72 73/* 74 * Start DMA controller 75 * 76 * Initialize the DMA controller as necessary. 77 */ 78 79/* zero out entire rx state RAM entry for the channel */ 80static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx) 81{ 82 musb_writel(&rx->rx_skipbytes, 0, 0); 83 musb_writel(&rx->rx_head, 0, 0); 84 musb_writel(&rx->rx_sop, 0, 0); 85 musb_writel(&rx->rx_current, 0, 0); 86 musb_writel(&rx->rx_buf_current, 0, 0); 87 musb_writel(&rx->rx_len_len, 0, 0); 88 musb_writel(&rx->rx_cnt_cnt, 0, 0); 89} 90 91/* zero out entire tx state RAM entry for the channel */ 92static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr) 93{ 94 musb_writel(&tx->tx_head, 0, 0); 95 musb_writel(&tx->tx_buf, 0, 0); 96 musb_writel(&tx->tx_current, 0, 0); 97 musb_writel(&tx->tx_buf_current, 0, 0); 98 musb_writel(&tx->tx_info, 0, 0); 99 musb_writel(&tx->tx_rem_len, 0, 0); 100 /* musb_writel(&tx->tx_dummy, 0, 0); */ 101 musb_writel(&tx->tx_complete, 0, ptr); 102} 103 104static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) 105{ 106 int j; 107 108 /* initialize channel fields */ 109 c->head = NULL; 110 c->tail = NULL; 111 c->last_processed = NULL; 112 c->channel.status = MUSB_DMA_STATUS_UNKNOWN; 113 c->controller = cppi; 114 c->is_rndis = 0; 115 c->freelist = NULL; 116 117 /* build the BD Free list for the channel */ 118 for (j = 0; j < NUM_TXCHAN_BD + 1; j++) { 119 struct cppi_descriptor *bd; 120 dma_addr_t dma; 121 122 bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma); 123 bd->dma = dma; 124 cppi_bd_free(c, bd); 125 } 126} 127 128static int cppi_channel_abort(struct dma_channel *); 129 130static void cppi_pool_free(struct cppi_channel *c) 131{ 132 struct cppi *cppi = c->controller; 133 struct cppi_descriptor *bd; 134 135 (void) cppi_channel_abort(&c->channel); 136 c->channel.status = MUSB_DMA_STATUS_UNKNOWN; 137 c->controller = NULL; 138 139 /* free all its bds */ 140 bd = c->last_processed; 141 do { 142 if (bd) 143 dma_pool_free(cppi->pool, bd, bd->dma); 144 bd = cppi_bd_alloc(c); 145 } while (bd); 146 c->last_processed = NULL; 147} 148 149static int __init cppi_controller_start(struct dma_controller *c) 150{ 151 struct cppi *controller; 152 void __iomem *tibase; 153 int i; 154 155 controller = container_of(c, struct cppi, controller); 156 157 /* do whatever is necessary to start controller */ 158 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { 159 controller->tx[i].transmit = true; 160 controller->tx[i].index = i; 161 } 162 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { 163 controller->rx[i].transmit = false; 164 controller->rx[i].index = i; 165 } 166 167 /* setup BD list on a per channel basis */ 168 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) 169 cppi_pool_init(controller, controller->tx + i); 170 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) 171 cppi_pool_init(controller, controller->rx + i); 172 173 tibase = controller->tibase; 174 INIT_LIST_HEAD(&controller->tx_complete); 175 176 /* initialise tx/rx channel head pointers to zero */ 177 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { 178 struct cppi_channel *tx_ch = controller->tx + i; 179 struct cppi_tx_stateram __iomem *tx; 180 181 INIT_LIST_HEAD(&tx_ch->tx_complete); 182 183 tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i); 184 tx_ch->state_ram = tx; 185 cppi_reset_tx(tx, 0); 186 } 187 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { 188 struct cppi_channel *rx_ch = controller->rx + i; 189 struct cppi_rx_stateram __iomem *rx; 190 191 INIT_LIST_HEAD(&rx_ch->tx_complete); 192 193 rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i); 194 rx_ch->state_ram = rx; 195 cppi_reset_rx(rx); 196 } 197 198 /* enable individual cppi channels */ 199 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, 200 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 201 musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG, 202 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 203 204 /* enable tx/rx CPPI control */ 205 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); 206 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); 207 208 /* disable RNDIS mode, also host rx RNDIS autorequest */ 209 musb_writel(tibase, DAVINCI_RNDIS_REG, 0); 210 musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0); 211 212 return 0; 213} 214 215/* 216 * Stop DMA controller 217 * 218 * De-Init the DMA controller as necessary. 219 */ 220 221static int cppi_controller_stop(struct dma_controller *c) 222{ 223 struct cppi *controller; 224 void __iomem *tibase; 225 int i; 226 227 controller = container_of(c, struct cppi, controller); 228 229 tibase = controller->tibase; 230 /* DISABLE INDIVIDUAL CHANNEL Interrupts */ 231 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, 232 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 233 musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG, 234 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 235 236 DBG(1, "Tearing down RX and TX Channels\n"); 237 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { 238 /* FIXME restructure of txdma to use bds like rxdma */ 239 controller->tx[i].last_processed = NULL; 240 cppi_pool_free(controller->tx + i); 241 } 242 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) 243 cppi_pool_free(controller->rx + i); 244 245 /* in Tx Case proper teardown is supported. We resort to disabling 246 * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is 247 * complete TX CPPI cannot be disabled. 248 */ 249 /*disable tx/rx cppi */ 250 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); 251 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); 252 253 return 0; 254} 255 256/* While dma channel is allocated, we only want the core irqs active 257 * for fault reports, otherwise we'd get irqs that we don't care about. 258 * Except for TX irqs, where dma done != fifo empty and reusable ... 259 * 260 * NOTE: docs don't say either way, but irq masking **enables** irqs. 261 * 262 * REVISIT same issue applies to pure PIO usage too, and non-cppi dma... 263 */ 264static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum) 265{ 266 musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8)); 267} 268 269static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum) 270{ 271 musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8)); 272} 273 274 275/* 276 * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to 277 * each transfer direction of a non-control endpoint, so allocating 278 * (and deallocating) is mostly a way to notice bad housekeeping on 279 * the software side. We assume the irqs are always active. 280 */ 281static struct dma_channel * 282cppi_channel_allocate(struct dma_controller *c, 283 struct musb_hw_ep *ep, u8 transmit) 284{ 285 struct cppi *controller; 286 u8 index; 287 struct cppi_channel *cppi_ch; 288 void __iomem *tibase; 289 290 controller = container_of(c, struct cppi, controller); 291 tibase = controller->tibase; 292 293 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ 294 index = ep->epnum - 1; 295 296 /* return the corresponding CPPI Channel Handle, and 297 * probably disable the non-CPPI irq until we need it. 298 */ 299 if (transmit) { 300 if (index >= ARRAY_SIZE(controller->tx)) { 301 DBG(1, "no %cX%d CPPI channel\n", 'T', index); 302 return NULL; 303 } 304 cppi_ch = controller->tx + index; 305 } else { 306 if (index >= ARRAY_SIZE(controller->rx)) { 307 DBG(1, "no %cX%d CPPI channel\n", 'R', index); 308 return NULL; 309 } 310 cppi_ch = controller->rx + index; 311 core_rxirq_disable(tibase, ep->epnum); 312 } 313 314 /* REVISIT make this an error later once the same driver code works 315 * with the other DMA engine too 316 */ 317 if (cppi_ch->hw_ep) 318 DBG(1, "re-allocating DMA%d %cX channel %p\n", 319 index, transmit ? 'T' : 'R', cppi_ch); 320 cppi_ch->hw_ep = ep; 321 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; 322 323 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); 324 return &cppi_ch->channel; 325} 326 327/* Release a CPPI Channel. */ 328static void cppi_channel_release(struct dma_channel *channel) 329{ 330 struct cppi_channel *c; 331 void __iomem *tibase; 332 333 /* REVISIT: for paranoia, check state and abort if needed... */ 334 335 c = container_of(channel, struct cppi_channel, channel); 336 tibase = c->controller->tibase; 337 if (!c->hw_ep) 338 DBG(1, "releasing idle DMA channel %p\n", c); 339 else if (!c->transmit) 340 core_rxirq_enable(tibase, c->index + 1); 341 342 /* for now, leave its cppi IRQ enabled (we won't trigger it) */ 343 c->hw_ep = NULL; 344 channel->status = MUSB_DMA_STATUS_UNKNOWN; 345} 346 347/* Context: controller irqlocked */ 348static void 349cppi_dump_rx(int level, struct cppi_channel *c, const char *tag) 350{ 351 void __iomem *base = c->controller->mregs; 352 struct cppi_rx_stateram __iomem *rx = c->state_ram; 353 354 musb_ep_select(base, c->index + 1); 355 356 DBG(level, "RX DMA%d%s: %d left, csr %04x, " 357 "%08x H%08x S%08x C%08x, " 358 "B%08x L%08x %08x .. %08x" 359 "\n", 360 c->index, tag, 361 musb_readl(c->controller->tibase, 362 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), 363 musb_readw(c->hw_ep->regs, MUSB_RXCSR), 364 365 musb_readl(&rx->rx_skipbytes, 0), 366 musb_readl(&rx->rx_head, 0), 367 musb_readl(&rx->rx_sop, 0), 368 musb_readl(&rx->rx_current, 0), 369 370 musb_readl(&rx->rx_buf_current, 0), 371 musb_readl(&rx->rx_len_len, 0), 372 musb_readl(&rx->rx_cnt_cnt, 0), 373 musb_readl(&rx->rx_complete, 0) 374 ); 375} 376 377/* Context: controller irqlocked */ 378static void 379cppi_dump_tx(int level, struct cppi_channel *c, const char *tag) 380{ 381 void __iomem *base = c->controller->mregs; 382 struct cppi_tx_stateram __iomem *tx = c->state_ram; 383 384 musb_ep_select(base, c->index + 1); 385 386 DBG(level, "TX DMA%d%s: csr %04x, " 387 "H%08x S%08x C%08x %08x, " 388 "F%08x L%08x .. %08x" 389 "\n", 390 c->index, tag, 391 musb_readw(c->hw_ep->regs, MUSB_TXCSR), 392 393 musb_readl(&tx->tx_head, 0), 394 musb_readl(&tx->tx_buf, 0), 395 musb_readl(&tx->tx_current, 0), 396 musb_readl(&tx->tx_buf_current, 0), 397 398 musb_readl(&tx->tx_info, 0), 399 musb_readl(&tx->tx_rem_len, 0), 400 /* dummy/unused word 6 */ 401 musb_readl(&tx->tx_complete, 0) 402 ); 403} 404 405/* Context: controller irqlocked */ 406static inline void 407cppi_rndis_update(struct cppi_channel *c, int is_rx, 408 void __iomem *tibase, int is_rndis) 409{ 410 /* we may need to change the rndis flag for this cppi channel */ 411 if (c->is_rndis != is_rndis) { 412 u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG); 413 u32 temp = 1 << (c->index); 414 415 if (is_rx) 416 temp <<= 16; 417 if (is_rndis) 418 value |= temp; 419 else 420 value &= ~temp; 421 musb_writel(tibase, DAVINCI_RNDIS_REG, value); 422 c->is_rndis = is_rndis; 423 } 424} 425 426static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd) 427{ 428 pr_debug("RXBD/%s %08x: " 429 "nxt %08x buf %08x off.blen %08x opt.plen %08x\n", 430 tag, bd->dma, 431 bd->hw_next, bd->hw_bufp, bd->hw_off_len, 432 bd->hw_options); 433} 434 435static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx) 436{ 437#if MUSB_DEBUG > 0 438 struct cppi_descriptor *bd; 439 440 if (!_dbg_level(level)) 441 return; 442 cppi_dump_rx(level, rx, tag); 443 if (rx->last_processed) 444 cppi_dump_rxbd("last", rx->last_processed); 445 for (bd = rx->head; bd; bd = bd->next) 446 cppi_dump_rxbd("active", bd); 447#endif 448} 449 450 451/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX; 452 * so we won't ever use it (see "CPPI RX Woes" below). 453 */ 454static inline int cppi_autoreq_update(struct cppi_channel *rx, 455 void __iomem *tibase, int onepacket, unsigned n_bds) 456{ 457 u32 val; 458 459#ifdef RNDIS_RX_IS_USABLE 460 u32 tmp; 461 /* assert(is_host_active(musb)) */ 462 463 /* start from "AutoReq never" */ 464 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); 465 val = tmp & ~((0x3) << (rx->index * 2)); 466 467 /* HCD arranged reqpkt for packet #1. we arrange int 468 * for all but the last one, maybe in two segments. 469 */ 470 if (!onepacket) { 471#if 0 472 /* use two segments, autoreq "all" then the last "never" */ 473 val |= ((0x3) << (rx->index * 2)); 474 n_bds--; 475#else 476 /* one segment, autoreq "all-but-last" */ 477 val |= ((0x1) << (rx->index * 2)); 478#endif 479 } 480 481 if (val != tmp) { 482 int n = 100; 483 484 /* make sure that autoreq is updated before continuing */ 485 musb_writel(tibase, DAVINCI_AUTOREQ_REG, val); 486 do { 487 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); 488 if (tmp == val) 489 break; 490 cpu_relax(); 491 } while (n-- > 0); 492 } 493#endif 494 495 /* REQPKT is turned off after each segment */ 496 if (n_bds && rx->channel.actual_len) { 497 void __iomem *regs = rx->hw_ep->regs; 498 499 val = musb_readw(regs, MUSB_RXCSR); 500 if (!(val & MUSB_RXCSR_H_REQPKT)) { 501 val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS; 502 musb_writew(regs, MUSB_RXCSR, val); 503 /* flush writebufer */ 504 val = musb_readw(regs, MUSB_RXCSR); 505 } 506 } 507 return n_bds; 508} 509 510 511/* Buffer enqueuing Logic: 512 * 513 * - RX builds new queues each time, to help handle routine "early 514 * termination" cases (faults, including errors and short reads) 515 * more correctly. 516 * 517 * - for now, TX reuses the same queue of BDs every time 518 * 519 * REVISIT long term, we want a normal dynamic model. 520 * ... the goal will be to append to the 521 * existing queue, processing completed "dma buffers" (segments) on the fly. 522 * 523 * Otherwise we force an IRQ latency between requests, which slows us a lot 524 * (especially in "transparent" dma). Unfortunately that model seems to be 525 * inherent in the DMA model from the Mentor code, except in the rare case 526 * of transfers big enough (~128+ KB) that we could append "middle" segments 527 * in the TX paths. (RX can't do this, see below.) 528 * 529 * That's true even in the CPPI- friendly iso case, where most urbs have 530 * several small segments provided in a group and where the "packet at a time" 531 * "transparent" DMA model is always correct, even on the RX side. 532 */ 533 534/* 535 * CPPI TX: 536 * ======== 537 * TX is a lot more reasonable than RX; it doesn't need to run in 538 * irq-per-packet mode very often. RNDIS mode seems to behave too 539 * (except how it handles the exactly-N-packets case). Building a 540 * txdma queue with multiple requests (urb or usb_request) looks 541 * like it would work ... but fault handling would need much testing. 542 * 543 * The main issue with TX mode RNDIS relates to transfer lengths that 544 * are an exact multiple of the packet length. It appears that there's 545 * a hiccup in that case (maybe the DMA completes before the ZLP gets 546 * written?) boiling down to not being able to rely on CPPI writing any 547 * terminating zero length packet before the next transfer is written. 548 * So that's punted to PIO; better yet, gadget drivers can avoid it. 549 * 550 * Plus, there's allegedly an undocumented constraint that rndis transfer 551 * length be a multiple of 64 bytes ... but the chip doesn't act that 552 * way, and we really don't _want_ that behavior anyway. 553 * 554 * On TX, "transparent" mode works ... although experiments have shown 555 * problems trying to use the SOP/EOP bits in different USB packets. 556 * 557 * REVISIT try to handle terminating zero length packets using CPPI 558 * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet 559 * links avoid that issue by forcing them to avoid zlps.) 560 */ 561static void 562cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) 563{ 564 unsigned maxpacket = tx->maxpacket; 565 dma_addr_t addr = tx->buf_dma + tx->offset; 566 size_t length = tx->buf_len - tx->offset; 567 struct cppi_descriptor *bd; 568 unsigned n_bds; 569 unsigned i; 570 struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram; 571 int rndis; 572 573 /* TX can use the CPPI "rndis" mode, where we can probably fit this 574 * transfer in one BD and one IRQ. The only time we would NOT want 575 * to use it is when hardware constraints prevent it, or if we'd 576 * trigger the "send a ZLP?" confusion. 577 */ 578 rndis = (maxpacket & 0x3f) == 0 579 && length < 0xffff 580 && (length % maxpacket) != 0; 581 582 if (rndis) { 583 maxpacket = length; 584 n_bds = 1; 585 } else { 586 n_bds = length / maxpacket; 587 if (!length || (length % maxpacket)) 588 n_bds++; 589 n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD); 590 length = min(n_bds * maxpacket, length); 591 } 592 593 DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", 594 tx->index, 595 maxpacket, 596 rndis ? "rndis" : "transparent", 597 n_bds, 598 addr, length); 599 600 cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); 601 602 /* assuming here that channel_program is called during 603 * transfer initiation ... current code maintains state 604 * for one outstanding request only (no queues, not even 605 * the implicit ones of an iso urb). 606 */ 607 608 bd = tx->freelist; 609 tx->head = bd; 610 tx->last_processed = NULL; 611 612 /* FIXME use BD pool like RX side does, and just queue 613 * the minimum number for this request. 614 */ 615 616 /* Prepare queue of BDs first, then hand it to hardware. 617 * All BDs except maybe the last should be of full packet 618 * size; for RNDIS there _is_ only that last packet. 619 */ 620 for (i = 0; i < n_bds; ) { 621 if (++i < n_bds && bd->next) 622 bd->hw_next = bd->next->dma; 623 else 624 bd->hw_next = 0; 625 626 bd->hw_bufp = tx->buf_dma + tx->offset; 627 628 /* FIXME set EOP only on the last packet, 629 * SOP only on the first ... avoid IRQs 630 */ 631 if ((tx->offset + maxpacket) <= tx->buf_len) { 632 tx->offset += maxpacket; 633 bd->hw_off_len = maxpacket; 634 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET 635 | CPPI_OWN_SET | maxpacket; 636 } else { 637 /* only this one may be a partial USB Packet */ 638 u32 partial_len; 639 640 partial_len = tx->buf_len - tx->offset; 641 tx->offset = tx->buf_len; 642 bd->hw_off_len = partial_len; 643 644 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET 645 | CPPI_OWN_SET | partial_len; 646 if (partial_len == 0) 647 bd->hw_options |= CPPI_ZERO_SET; 648 } 649 650 DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n", 651 bd, bd->hw_next, bd->hw_bufp, 652 bd->hw_off_len, bd->hw_options); 653 654 /* update the last BD enqueued to the list */ 655 tx->tail = bd; 656 bd = bd->next; 657 } 658 659 /* BDs live in DMA-coherent memory, but writes might be pending */ 660 cpu_drain_writebuffer(); 661 662 /* Write to the HeadPtr in state RAM to trigger */ 663 musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma); 664 665 cppi_dump_tx(5, tx, "/S"); 666} 667 668/* 669 * CPPI RX Woes: 670 * ============= 671 * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte 672 * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back. 673 * (Full speed transfers have similar scenarios.) 674 * 675 * The correct behavior for Linux is that (a) fills the buffer with 300 bytes, 676 * and the next packet goes into a buffer that's queued later; while (b) fills 677 * the buffer with 1024 bytes. How to do that with CPPI? 678 * 679 * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but 680 * (b) loses **BADLY** because nothing (!) happens when that second packet 681 * fills the buffer, much less when a third one arrives. (Which makes this 682 * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination 683 * is optional, and it's fine if peripherals -- not hosts! -- pad messages 684 * out to end-of-buffer. Standard PCI host controller DMA descriptors 685 * implement that mode by default ... which is no accident.) 686 * 687 * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have 688 * converse problems: (b) is handled right, but (a) loses badly. CPPI RX 689 * ignores SOP/EOP markings and processes both of those BDs; so both packets 690 * are loaded into the buffer (with a 212 byte gap between them), and the next 691 * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP 692 * are intended as outputs for RX queues, not inputs...) 693 * 694 * - A variant of "transparent" mode -- one BD at a time -- is the only way to 695 * reliably make both cases work, with software handling both cases correctly 696 * and at the significant penalty of needing an IRQ per packet. (The lack of 697 * I/O overlap can be slightly ameliorated by enabling double buffering.) 698 * 699 * So how to get rid of IRQ-per-packet? The transparent multi-BD case could 700 * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK 701 * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors 702 * with guaranteed driver level fault recovery and scrubbing out what's left 703 * of that garbaged datastream. 704 * 705 * But there seems to be no way to identify the cases where CPPI RNDIS mode 706 * is appropriate -- which do NOT include RNDIS host drivers, but do include 707 * the CDC Ethernet driver! -- and the documentation is incomplete/wrong. 708 * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic 709 * that applies best on the peripheral side (and which could fail rudely). 710 * 711 * Leaving only "transparent" mode; we avoid multi-bd modes in almost all 712 * cases other than mass storage class. Otherwise we're correct but slow, 713 * since CPPI penalizes our need for a "true RNDIS" default mode. 714 */ 715 716 717/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY 718 * 719 * IFF 720 * (a) peripheral mode ... since rndis peripherals could pad their 721 * writes to hosts, causing i/o failure; or we'd have to cope with 722 * a largely unknowable variety of host side protocol variants 723 * (b) and short reads are NOT errors ... since full reads would 724 * cause those same i/o failures 725 * (c) and read length is 726 * - less than 64KB (max per cppi descriptor) 727 * - not a multiple of 4096 (g_zero default, full reads typical) 728 * - N (>1) packets long, ditto (full reads not EXPECTED) 729 * THEN 730 * try rx rndis mode 731 * 732 * Cost of heuristic failing: RXDMA wedges at the end of transfers that 733 * fill out the whole buffer. Buggy host side usb network drivers could 734 * trigger that, but "in the field" such bugs seem to be all but unknown. 735 * 736 * So this module parameter lets the heuristic be disabled. When using 737 * gadgetfs, the heuristic will probably need to be disabled. 738 */ 739static int cppi_rx_rndis = 1; 740 741module_param(cppi_rx_rndis, bool, 0); 742MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic"); 743 744 745/** 746 * cppi_next_rx_segment - dma read for the next chunk of a buffer 747 * @musb: the controller 748 * @rx: dma channel 749 * @onepacket: true unless caller treats short reads as errors, and 750 * performs fault recovery above usbcore. 751 * Context: controller irqlocked 752 * 753 * See above notes about why we can't use multi-BD RX queues except in 754 * rare cases (mass storage class), and can never use the hardware "rndis" 755 * mode (since it's not a "true" RNDIS mode) with complete safety.. 756 * 757 * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in 758 * code to recover from corrupted datastreams after each short transfer. 759 */ 760static void 761cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) 762{ 763 unsigned maxpacket = rx->maxpacket; 764 dma_addr_t addr = rx->buf_dma + rx->offset; 765 size_t length = rx->buf_len - rx->offset; 766 struct cppi_descriptor *bd, *tail; 767 unsigned n_bds; 768 unsigned i; 769 void __iomem *tibase = musb->ctrl_base; 770 int is_rndis = 0; 771 struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram; 772 773 if (onepacket) { 774 /* almost every USB driver, host or peripheral side */ 775 n_bds = 1; 776 777 /* maybe apply the heuristic above */ 778 if (cppi_rx_rndis 779 && is_peripheral_active(musb) 780 && length > maxpacket 781 && (length & ~0xffff) == 0 782 && (length & 0x0fff) != 0 783 && (length & (maxpacket - 1)) == 0) { 784 maxpacket = length; 785 is_rndis = 1; 786 } 787 } else { 788 /* virtually nothing except mass storage class */ 789 if (length > 0xffff) { 790 n_bds = 0xffff / maxpacket; 791 length = n_bds * maxpacket; 792 } else { 793 n_bds = length / maxpacket; 794 if (length % maxpacket) 795 n_bds++; 796 } 797 if (n_bds == 1) 798 onepacket = 1; 799 else 800 n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD); 801 } 802 803 /* In host mode, autorequest logic can generate some IN tokens; it's 804 * tricky since we can't leave REQPKT set in RXCSR after the transfer 805 * finishes. So: multipacket transfers involve two or more segments. 806 * And always at least two IRQs ... RNDIS mode is not an option. 807 */ 808 if (is_host_active(musb)) 809 n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds); 810 811 cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis); 812 813 length = min(n_bds * maxpacket, length); 814 815 DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " 816 "dma 0x%x len %u %u/%u\n", 817 rx->index, maxpacket, 818 onepacket 819 ? (is_rndis ? "rndis" : "onepacket") 820 : "multipacket", 821 n_bds, 822 musb_readl(tibase, 823 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) 824 & 0xffff, 825 addr, length, rx->channel.actual_len, rx->buf_len); 826 827 /* only queue one segment at a time, since the hardware prevents 828 * correct queue shutdown after unexpected short packets 829 */ 830 bd = cppi_bd_alloc(rx); 831 rx->head = bd; 832 833 /* Build BDs for all packets in this segment */ 834 for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) { 835 u32 bd_len; 836 837 if (i) { 838 bd = cppi_bd_alloc(rx); 839 if (!bd) 840 break; 841 tail->next = bd; 842 tail->hw_next = bd->dma; 843 } 844 bd->hw_next = 0; 845 846 /* all but the last packet will be maxpacket size */ 847 if (maxpacket < length) 848 bd_len = maxpacket; 849 else 850 bd_len = length; 851 852 bd->hw_bufp = addr; 853 addr += bd_len; 854 rx->offset += bd_len; 855 856 bd->hw_off_len = (0 /*offset*/ << 16) + bd_len; 857 bd->buflen = bd_len; 858 859 bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0); 860 length -= bd_len; 861 } 862 863 /* we always expect at least one reusable BD! */ 864 if (!tail) { 865 WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds); 866 return; 867 } else if (i < n_bds) 868 WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds); 869 870 tail->next = NULL; 871 tail->hw_next = 0; 872 873 bd = rx->head; 874 rx->tail = tail; 875 876 /* short reads and other faults should terminate this entire 877 * dma segment. we want one "dma packet" per dma segment, not 878 * one per USB packet, terminating the whole queue at once... 879 * NOTE that current hardware seems to ignore SOP and EOP. 880 */ 881 bd->hw_options |= CPPI_SOP_SET; 882 tail->hw_options |= CPPI_EOP_SET; 883 884 if (debug >= 5) { 885 struct cppi_descriptor *d; 886 887 for (d = rx->head; d; d = d->next) 888 cppi_dump_rxbd("S", d); 889 } 890 891 /* in case the preceding transfer left some state... */ 892 tail = rx->last_processed; 893 if (tail) { 894 tail->next = bd; 895 tail->hw_next = bd->dma; 896 } 897 898 core_rxirq_enable(tibase, rx->index + 1); 899 900 /* BDs live in DMA-coherent memory, but writes might be pending */ 901 cpu_drain_writebuffer(); 902 903 /* REVISIT specs say to write this AFTER the BUFCNT register 904 * below ... but that loses badly. 905 */ 906 musb_writel(&rx_ram->rx_head, 0, bd->dma); 907 908 /* bufferCount must be at least 3, and zeroes on completion 909 * unless it underflows below zero, or stops at two, or keeps 910 * growing ... grr. 911 */ 912 i = musb_readl(tibase, 913 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) 914 & 0xffff; 915 916 if (!i) 917 musb_writel(tibase, 918 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), 919 n_bds + 2); 920 else if (n_bds > (i - 3)) 921 musb_writel(tibase, 922 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), 923 n_bds - (i - 3)); 924 925 i = musb_readl(tibase, 926 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) 927 & 0xffff; 928 if (i < (2 + n_bds)) { 929 DBG(2, "bufcnt%d underrun - %d (for %d)\n", 930 rx->index, i, n_bds); 931 musb_writel(tibase, 932 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), 933 n_bds + 2); 934 } 935 936 cppi_dump_rx(4, rx, "/S"); 937} 938 939/** 940 * cppi_channel_program - program channel for data transfer 941 * @ch: the channel 942 * @maxpacket: max packet size 943 * @mode: For RX, 1 unless the usb protocol driver promised to treat 944 * all short reads as errors and kick in high level fault recovery. 945 * For TX, ignored because of RNDIS mode races/glitches. 946 * @dma_addr: dma address of buffer 947 * @len: length of buffer 948 * Context: controller irqlocked 949 */ 950static int cppi_channel_program(struct dma_channel *ch, 951 u16 maxpacket, u8 mode, 952 dma_addr_t dma_addr, u32 len) 953{ 954 struct cppi_channel *cppi_ch; 955 struct cppi *controller; 956 struct musb *musb; 957 958 cppi_ch = container_of(ch, struct cppi_channel, channel); 959 controller = cppi_ch->controller; 960 musb = controller->musb; 961 962 switch (ch->status) { 963 case MUSB_DMA_STATUS_BUS_ABORT: 964 case MUSB_DMA_STATUS_CORE_ABORT: 965 /* fault irq handler should have handled cleanup */ 966 WARNING("%cX DMA%d not cleaned up after abort!\n", 967 cppi_ch->transmit ? 'T' : 'R', 968 cppi_ch->index); 969 /* WARN_ON(1); */ 970 break; 971 case MUSB_DMA_STATUS_BUSY: 972 WARNING("program active channel? %cX DMA%d\n", 973 cppi_ch->transmit ? 'T' : 'R', 974 cppi_ch->index); 975 /* WARN_ON(1); */ 976 break; 977 case MUSB_DMA_STATUS_UNKNOWN: 978 DBG(1, "%cX DMA%d not allocated!\n", 979 cppi_ch->transmit ? 'T' : 'R', 980 cppi_ch->index); 981 /* FALLTHROUGH */ 982 case MUSB_DMA_STATUS_FREE: 983 break; 984 } 985 986 ch->status = MUSB_DMA_STATUS_BUSY; 987 988 /* set transfer parameters, then queue up its first segment */ 989 cppi_ch->buf_dma = dma_addr; 990 cppi_ch->offset = 0; 991 cppi_ch->maxpacket = maxpacket; 992 cppi_ch->buf_len = len; 993 994 /* TX channel? or RX? */ 995 if (cppi_ch->transmit) 996 cppi_next_tx_segment(musb, cppi_ch); 997 else 998 cppi_next_rx_segment(musb, cppi_ch, mode); 999 1000 return true; 1001} 1002 1003static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) 1004{ 1005 struct cppi_channel *rx = &cppi->rx[ch]; 1006 struct cppi_rx_stateram __iomem *state = rx->state_ram; 1007 struct cppi_descriptor *bd; 1008 struct cppi_descriptor *last = rx->last_processed; 1009 bool completed = false; 1010 bool acked = false; 1011 int i; 1012 dma_addr_t safe2ack; 1013 void __iomem *regs = rx->hw_ep->regs; 1014 1015 cppi_dump_rx(6, rx, "/K"); 1016 1017 bd = last ? last->next : rx->head; 1018 if (!bd) 1019 return false; 1020 1021 /* run through all completed BDs */ 1022 for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0); 1023 (safe2ack || completed) && bd && i < NUM_RXCHAN_BD; 1024 i++, bd = bd->next) { 1025 u16 len; 1026 1027 /* catch latest BD writes from CPPI */ 1028 rmb(); 1029 if (!completed && (bd->hw_options & CPPI_OWN_SET)) 1030 break; 1031 1032 DBG(5, "C/RXBD %08x: nxt %08x buf %08x " 1033 "off.len %08x opt.len %08x (%d)\n", 1034 bd->dma, bd->hw_next, bd->hw_bufp, 1035 bd->hw_off_len, bd->hw_options, 1036 rx->channel.actual_len); 1037 1038 /* actual packet received length */ 1039 if ((bd->hw_options & CPPI_SOP_SET) && !completed) 1040 len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK; 1041 else 1042 len = 0; 1043 1044 if (bd->hw_options & CPPI_EOQ_MASK) 1045 completed = true; 1046 1047 if (!completed && len < bd->buflen) { 1048 /* NOTE: when we get a short packet, RXCSR_H_REQPKT 1049 * must have been cleared, and no more DMA packets may 1050 * active be in the queue... TI docs didn't say, but 1051 * CPPI ignores those BDs even though OWN is still set. 1052 */ 1053 completed = true; 1054 DBG(3, "rx short %d/%d (%d)\n", 1055 len, bd->buflen, 1056 rx->channel.actual_len); 1057 } 1058 1059 /* If we got here, we expect to ack at least one BD; meanwhile 1060 * CPPI may completing other BDs while we scan this list... 1061 * 1062 * RACE: we can notice OWN cleared before CPPI raises the 1063 * matching irq by writing that BD as the completion pointer. 1064 * In such cases, stop scanning and wait for the irq, avoiding 1065 * lost acks and states where BD ownership is unclear. 1066 */ 1067 if (bd->dma == safe2ack) { 1068 musb_writel(&state->rx_complete, 0, safe2ack); 1069 safe2ack = musb_readl(&state->rx_complete, 0); 1070 acked = true; 1071 if (bd->dma == safe2ack) 1072 safe2ack = 0; 1073 } 1074 1075 rx->channel.actual_len += len; 1076 1077 cppi_bd_free(rx, last); 1078 last = bd; 1079 1080 /* stop scanning on end-of-segment */ 1081 if (bd->hw_next == 0) 1082 completed = true; 1083 } 1084 rx->last_processed = last; 1085 1086 /* dma abort, lost ack, or ... */ 1087 if (!acked && last) { 1088 int csr; 1089 1090 if (safe2ack == 0 || safe2ack == rx->last_processed->dma) 1091 musb_writel(&state->rx_complete, 0, safe2ack); 1092 if (safe2ack == 0) { 1093 cppi_bd_free(rx, last); 1094 rx->last_processed = NULL; 1095 1096 /* if we land here on the host side, H_REQPKT will 1097 * be clear and we need to restart the queue... 1098 */ 1099 WARN_ON(rx->head); 1100 } 1101 musb_ep_select(cppi->mregs, rx->index + 1); 1102 csr = musb_readw(regs, MUSB_RXCSR); 1103 if (csr & MUSB_RXCSR_DMAENAB) { 1104 DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", 1105 rx->index, 1106 rx->head, rx->tail, 1107 rx->last_processed 1108 ? rx->last_processed->dma 1109 : 0, 1110 completed ? ", completed" : "", 1111 csr); 1112 cppi_dump_rxq(4, "/what?", rx); 1113 } 1114 } 1115 if (!completed) { 1116 int csr; 1117 1118 rx->head = bd; 1119 1120 /* REVISIT seems like "autoreq all but EOP" doesn't... 1121 * setting it here "should" be racey, but seems to work 1122 */ 1123 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); 1124 if (is_host_active(cppi->musb) 1125 && bd 1126 && !(csr & MUSB_RXCSR_H_REQPKT)) { 1127 csr |= MUSB_RXCSR_H_REQPKT; 1128 musb_writew(regs, MUSB_RXCSR, 1129 MUSB_RXCSR_H_WZC_BITS | csr); 1130 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); 1131 } 1132 } else { 1133 rx->head = NULL; 1134 rx->tail = NULL; 1135 } 1136 1137 cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned"); 1138 return completed; 1139} 1140 1141void cppi_completion(struct musb *musb, u32 rx, u32 tx) 1142{ 1143 void __iomem *tibase; 1144 int i, index; 1145 struct cppi *cppi; 1146 struct musb_hw_ep *hw_ep = NULL; 1147 1148 cppi = container_of(musb->dma_controller, struct cppi, controller); 1149 1150 tibase = musb->ctrl_base; 1151 1152 /* process TX channels */ 1153 for (index = 0; tx; tx = tx >> 1, index++) { 1154 struct cppi_channel *tx_ch; 1155 struct cppi_tx_stateram __iomem *tx_ram; 1156 bool completed = false; 1157 struct cppi_descriptor *bd; 1158 1159 if (!(tx & 1)) 1160 continue; 1161 1162 tx_ch = cppi->tx + index; 1163 tx_ram = tx_ch->state_ram; 1164 1165 /* FIXME need a cppi_tx_scan() routine, which 1166 * can also be called from abort code 1167 */ 1168 1169 cppi_dump_tx(5, tx_ch, "/E"); 1170 1171 bd = tx_ch->head; 1172 1173 if (NULL == bd) { 1174 DBG(1, "null BD\n"); 1175 continue; 1176 } 1177 1178 /* run through all completed BDs */ 1179 for (i = 0; !completed && bd && i < NUM_TXCHAN_BD; 1180 i++, bd = bd->next) { 1181 u16 len; 1182 1183 /* catch latest BD writes from CPPI */ 1184 rmb(); 1185 if (bd->hw_options & CPPI_OWN_SET) 1186 break; 1187 1188 DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n", 1189 bd, bd->hw_next, bd->hw_bufp, 1190 bd->hw_off_len, bd->hw_options); 1191 1192 len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK; 1193 tx_ch->channel.actual_len += len; 1194 1195 tx_ch->last_processed = bd; 1196 1197 /* write completion register to acknowledge 1198 * processing of completed BDs, and possibly 1199 * release the IRQ; EOQ might not be set ... 1200 * 1201 * REVISIT use the same ack strategy as rx 1202 * 1203 * REVISIT have observed bit 18 set; huh?? 1204 */ 1205 /* if ((bd->hw_options & CPPI_EOQ_MASK)) */ 1206 musb_writel(&tx_ram->tx_complete, 0, bd->dma); 1207 1208 /* stop scanning on end-of-segment */ 1209 if (bd->hw_next == 0) 1210 completed = true; 1211 } 1212 1213 /* on end of segment, maybe go to next one */ 1214 if (completed) { 1215 /* cppi_dump_tx(4, tx_ch, "/complete"); */ 1216 1217 /* transfer more, or report completion */ 1218 if (tx_ch->offset >= tx_ch->buf_len) { 1219 tx_ch->head = NULL; 1220 tx_ch->tail = NULL; 1221 tx_ch->channel.status = MUSB_DMA_STATUS_FREE; 1222 1223 hw_ep = tx_ch->hw_ep; 1224 1225 /* Peripheral role never repurposes the 1226 * endpoint, so immediate completion is 1227 * safe. Host role waits for the fifo 1228 * to empty (TXPKTRDY irq) before going 1229 * to the next queued bulk transfer. 1230 */ 1231 if (is_host_active(cppi->musb)) { 1232#if 0 1233 /* WORKAROUND because we may 1234 * not always get TXKPTRDY ... 1235 */ 1236 int csr; 1237 1238 csr = musb_readw(hw_ep->regs, 1239 MUSB_TXCSR); 1240 if (csr & MUSB_TXCSR_TXPKTRDY) 1241#endif 1242 completed = false; 1243 } 1244 if (completed) 1245 musb_dma_completion(musb, index + 1, 1); 1246 1247 } else { 1248 /* Bigger transfer than we could fit in 1249 * that first batch of descriptors... 1250 */ 1251 cppi_next_tx_segment(musb, tx_ch); 1252 } 1253 } else 1254 tx_ch->head = bd; 1255 } 1256 1257 /* Start processing the RX block */ 1258 for (index = 0; rx; rx = rx >> 1, index++) { 1259 1260 if (rx & 1) { 1261 struct cppi_channel *rx_ch; 1262 1263 rx_ch = cppi->rx + index; 1264 1265 /* let incomplete dma segments finish */ 1266 if (!cppi_rx_scan(cppi, index)) 1267 continue; 1268 1269 /* start another dma segment if needed */ 1270 if (rx_ch->channel.actual_len != rx_ch->buf_len 1271 && rx_ch->channel.actual_len 1272 == rx_ch->offset) { 1273 cppi_next_rx_segment(musb, rx_ch, 1); 1274 continue; 1275 } 1276 1277 /* all segments completed! */ 1278 rx_ch->channel.status = MUSB_DMA_STATUS_FREE; 1279 1280 hw_ep = rx_ch->hw_ep; 1281 1282 core_rxirq_disable(tibase, index + 1); 1283 musb_dma_completion(musb, index + 1, 0); 1284 } 1285 } 1286 1287 /* write to CPPI EOI register to re-enable interrupts */ 1288 musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); 1289} 1290 1291/* Instantiate a software object representing a DMA controller. */ 1292struct dma_controller *__init 1293dma_controller_create(struct musb *musb, void __iomem *mregs) 1294{ 1295 struct cppi *controller; 1296 1297 controller = kzalloc(sizeof *controller, GFP_KERNEL); 1298 if (!controller) 1299 return NULL; 1300 1301 controller->mregs = mregs; 1302 controller->tibase = mregs - DAVINCI_BASE_OFFSET; 1303 1304 controller->musb = musb; 1305 controller->controller.start = cppi_controller_start; 1306 controller->controller.stop = cppi_controller_stop; 1307 controller->controller.channel_alloc = cppi_channel_allocate; 1308 controller->controller.channel_release = cppi_channel_release; 1309 controller->controller.channel_program = cppi_channel_program; 1310 controller->controller.channel_abort = cppi_channel_abort; 1311 1312 /* NOTE: allocating from on-chip SRAM would give the least 1313 * contention for memory access, if that ever matters here. 1314 */ 1315 1316 /* setup BufferPool */ 1317 controller->pool = dma_pool_create("cppi", 1318 controller->musb->controller, 1319 sizeof(struct cppi_descriptor), 1320 CPPI_DESCRIPTOR_ALIGN, 0); 1321 if (!controller->pool) { 1322 kfree(controller); 1323 return NULL; 1324 } 1325 1326 return &controller->controller; 1327} 1328 1329/* 1330 * Destroy a previously-instantiated DMA controller. 1331 */ 1332void dma_controller_destroy(struct dma_controller *c) 1333{ 1334 struct cppi *cppi; 1335 1336 cppi = container_of(c, struct cppi, controller); 1337 1338 /* assert: caller stopped the controller first */ 1339 dma_pool_destroy(cppi->pool); 1340 1341 kfree(cppi); 1342} 1343 1344/* 1345 * Context: controller irqlocked, endpoint selected 1346 */ 1347static int cppi_channel_abort(struct dma_channel *channel) 1348{ 1349 struct cppi_channel *cppi_ch; 1350 struct cppi *controller; 1351 void __iomem *mbase; 1352 void __iomem *tibase; 1353 void __iomem *regs; 1354 u32 value; 1355 struct cppi_descriptor *queue; 1356 1357 cppi_ch = container_of(channel, struct cppi_channel, channel); 1358 1359 controller = cppi_ch->controller; 1360 1361 switch (channel->status) { 1362 case MUSB_DMA_STATUS_BUS_ABORT: 1363 case MUSB_DMA_STATUS_CORE_ABORT: 1364 /* from RX or TX fault irq handler */ 1365 case MUSB_DMA_STATUS_BUSY: 1366 /* the hardware needs shutting down */ 1367 regs = cppi_ch->hw_ep->regs; 1368 break; 1369 case MUSB_DMA_STATUS_UNKNOWN: 1370 case MUSB_DMA_STATUS_FREE: 1371 return 0; 1372 default: 1373 return -EINVAL; 1374 } 1375 1376 if (!cppi_ch->transmit && cppi_ch->head) 1377 cppi_dump_rxq(3, "/abort", cppi_ch); 1378 1379 mbase = controller->mregs; 1380 tibase = controller->tibase; 1381 1382 queue = cppi_ch->head; 1383 cppi_ch->head = NULL; 1384 cppi_ch->tail = NULL; 1385 1386 /* REVISIT should rely on caller having done this, 1387 * and caller should rely on us not changing it. 1388 * peripheral code is safe ... check host too. 1389 */ 1390 musb_ep_select(mbase, cppi_ch->index + 1); 1391 1392 if (cppi_ch->transmit) { 1393 struct cppi_tx_stateram __iomem *tx_ram; 1394 int enabled; 1395 1396 /* mask interrupts raised to signal teardown complete. */ 1397 enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG) 1398 & (1 << cppi_ch->index); 1399 if (enabled) 1400 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, 1401 (1 << cppi_ch->index)); 1402 1403 /* REVISIT put timeouts on these controller handshakes */ 1404 1405 cppi_dump_tx(6, cppi_ch, " (teardown)"); 1406 1407 /* teardown DMA engine then usb core */ 1408 do { 1409 value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG); 1410 } while (!(value & CPPI_TEAR_READY)); 1411 musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index); 1412 1413 tx_ram = cppi_ch->state_ram; 1414 do { 1415 value = musb_readl(&tx_ram->tx_complete, 0); 1416 } while (0xFFFFFFFC != value); 1417 musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC); 1418 1419 /* FIXME clean up the transfer state ... here? 1420 * the completion routine should get called with 1421 * an appropriate status code. 1422 */ 1423 1424 value = musb_readw(regs, MUSB_TXCSR); 1425 value &= ~MUSB_TXCSR_DMAENAB; 1426 value |= MUSB_TXCSR_FLUSHFIFO; 1427 musb_writew(regs, MUSB_TXCSR, value); 1428 musb_writew(regs, MUSB_TXCSR, value); 1429 1430 /* re-enable interrupt */ 1431 if (enabled) 1432 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, 1433 (1 << cppi_ch->index)); 1434 1435 /* While we scrub the TX state RAM, ensure that we clean 1436 * up any interrupt that's currently asserted: 1437 * 1. Write to completion Ptr value 0x1(bit 0 set) 1438 * (write back mode) 1439 * 2. Write to completion Ptr value 0x0(bit 0 cleared) 1440 * (compare mode) 1441 * Value written is compared(for bits 31:2) and when 1442 * equal, interrupt is deasserted. 1443 */ 1444 cppi_reset_tx(tx_ram, 1); 1445 musb_writel(&tx_ram->tx_complete, 0, 0); 1446 1447 cppi_dump_tx(5, cppi_ch, " (done teardown)"); 1448 1449 /* REVISIT tx side _should_ clean up the same way 1450 * as the RX side ... this does no cleanup at all! 1451 */ 1452 1453 } else /* RX */ { 1454 u16 csr; 1455 1456 /* NOTE: docs don't guarantee any of this works ... we 1457 * expect that if the usb core stops telling the cppi core 1458 * to pull more data from it, then it'll be safe to flush 1459 * current RX DMA state iff any pending fifo transfer is done. 1460 */ 1461 1462 core_rxirq_disable(tibase, cppi_ch->index + 1); 1463 1464 /* for host, ensure ReqPkt is never set again */ 1465 if (is_host_active(cppi_ch->controller->musb)) { 1466 value = musb_readl(tibase, DAVINCI_AUTOREQ_REG); 1467 value &= ~((0x3) << (cppi_ch->index * 2)); 1468 musb_writel(tibase, DAVINCI_AUTOREQ_REG, value); 1469 } 1470 1471 csr = musb_readw(regs, MUSB_RXCSR); 1472 1473 /* for host, clear (just) ReqPkt at end of current packet(s) */ 1474 if (is_host_active(cppi_ch->controller->musb)) { 1475 csr |= MUSB_RXCSR_H_WZC_BITS; 1476 csr &= ~MUSB_RXCSR_H_REQPKT; 1477 } else 1478 csr |= MUSB_RXCSR_P_WZC_BITS; 1479 1480 /* clear dma enable */ 1481 csr &= ~(MUSB_RXCSR_DMAENAB); 1482 musb_writew(regs, MUSB_RXCSR, csr); 1483 csr = musb_readw(regs, MUSB_RXCSR); 1484 1485 /* Quiesce: wait for current dma to finish (if not cleanup). 1486 * We can't use bit zero of stateram->rx_sop, since that 1487 * refers to an entire "DMA packet" not just emptying the 1488 * current fifo. Most segments need multiple usb packets. 1489 */ 1490 if (channel->status == MUSB_DMA_STATUS_BUSY) 1491 udelay(50); 1492 1493 /* scan the current list, reporting any data that was 1494 * transferred and acking any IRQ 1495 */ 1496 cppi_rx_scan(controller, cppi_ch->index); 1497 1498 /* clobber the existing state once it's idle 1499 * 1500 * NOTE: arguably, we should also wait for all the other 1501 * RX channels to quiesce (how??) and then temporarily 1502 * disable RXCPPI_CTRL_REG ... but it seems that we can 1503 * rely on the controller restarting from state ram, with 1504 * only RXCPPI_BUFCNT state being bogus. BUFCNT will 1505 * correct itself after the next DMA transfer though. 1506 * 1507 * REVISIT does using rndis mode change that? 1508 */ 1509 cppi_reset_rx(cppi_ch->state_ram); 1510 1511 /* next DMA request _should_ load cppi head ptr */ 1512 1513 /* ... we don't "free" that list, only mutate it in place. */ 1514 cppi_dump_rx(5, cppi_ch, " (done abort)"); 1515 1516 /* clean up previously pending bds */ 1517 cppi_bd_free(cppi_ch, cppi_ch->last_processed); 1518 cppi_ch->last_processed = NULL; 1519 1520 while (queue) { 1521 struct cppi_descriptor *tmp = queue->next; 1522 1523 cppi_bd_free(cppi_ch, queue); 1524 queue = tmp; 1525 } 1526 } 1527 1528 channel->status = MUSB_DMA_STATUS_FREE; 1529 cppi_ch->buf_dma = 0; 1530 cppi_ch->offset = 0; 1531 cppi_ch->buf_len = 0; 1532 cppi_ch->maxpacket = 0; 1533 return 0; 1534} 1535 1536/* TBD Queries: 1537 * 1538 * Power Management ... probably turn off cppi during suspend, restart; 1539 * check state ram? Clocking is presumably shared with usb core. 1540 */